1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 *
4 * Copyright (C) 2019-2021 Paragon Software GmbH, All rights reserved.
5 *
6 * TODO: Merge attr_set_size/attr_data_get_block/attr_allocate_frame?
7 */
8
9 #include <linux/fs.h>
10 #include <linux/slab.h>
11 #include <linux/kernel.h>
12
13 #include "debug.h"
14 #include "ntfs.h"
15 #include "ntfs_fs.h"
16
17 /*
18 * You can set external NTFS_MIN_LOG2_OF_CLUMP/NTFS_MAX_LOG2_OF_CLUMP to manage
19 * preallocate algorithm.
20 */
21 #ifndef NTFS_MIN_LOG2_OF_CLUMP
22 #define NTFS_MIN_LOG2_OF_CLUMP 16
23 #endif
24
25 #ifndef NTFS_MAX_LOG2_OF_CLUMP
26 #define NTFS_MAX_LOG2_OF_CLUMP 26
27 #endif
28
29 // 16M
30 #define NTFS_CLUMP_MIN (1 << (NTFS_MIN_LOG2_OF_CLUMP + 8))
31 // 16G
32 #define NTFS_CLUMP_MAX (1ull << (NTFS_MAX_LOG2_OF_CLUMP + 8))
33
get_pre_allocated(u64 size)34 static inline u64 get_pre_allocated(u64 size)
35 {
36 u32 clump;
37 u8 align_shift;
38 u64 ret;
39
40 if (size <= NTFS_CLUMP_MIN) {
41 clump = 1 << NTFS_MIN_LOG2_OF_CLUMP;
42 align_shift = NTFS_MIN_LOG2_OF_CLUMP;
43 } else if (size >= NTFS_CLUMP_MAX) {
44 clump = 1 << NTFS_MAX_LOG2_OF_CLUMP;
45 align_shift = NTFS_MAX_LOG2_OF_CLUMP;
46 } else {
47 align_shift = NTFS_MIN_LOG2_OF_CLUMP - 1 +
48 __ffs(size >> (8 + NTFS_MIN_LOG2_OF_CLUMP));
49 clump = 1u << align_shift;
50 }
51
52 ret = (((size + clump - 1) >> align_shift)) << align_shift;
53
54 return ret;
55 }
56
57 /*
58 * attr_load_runs - Load all runs stored in @attr.
59 */
attr_load_runs(struct ATTRIB * attr,struct ntfs_inode * ni,struct runs_tree * run,const CLST * vcn)60 static int attr_load_runs(struct ATTRIB *attr, struct ntfs_inode *ni,
61 struct runs_tree *run, const CLST *vcn)
62 {
63 int err;
64 CLST svcn = le64_to_cpu(attr->nres.svcn);
65 CLST evcn = le64_to_cpu(attr->nres.evcn);
66 u32 asize;
67 u16 run_off;
68
69 if (svcn >= evcn + 1 || run_is_mapped_full(run, svcn, evcn))
70 return 0;
71
72 if (vcn && (evcn < *vcn || *vcn < svcn))
73 return -EINVAL;
74
75 asize = le32_to_cpu(attr->size);
76 run_off = le16_to_cpu(attr->nres.run_off);
77
78 if (run_off > asize)
79 return -EINVAL;
80
81 err = run_unpack_ex(run, ni->mi.sbi, ni->mi.rno, svcn, evcn,
82 vcn ? *vcn : svcn, Add2Ptr(attr, run_off),
83 asize - run_off);
84 if (err < 0)
85 return err;
86
87 return 0;
88 }
89
90 /*
91 * run_deallocate_ex - Deallocate clusters.
92 */
run_deallocate_ex(struct ntfs_sb_info * sbi,struct runs_tree * run,CLST vcn,CLST len,CLST * done,bool trim,struct runs_tree * run_da)93 static int run_deallocate_ex(struct ntfs_sb_info *sbi, struct runs_tree *run,
94 CLST vcn, CLST len, CLST *done, bool trim,
95 struct runs_tree *run_da)
96 {
97 int err = 0;
98 CLST vcn_next, vcn0 = vcn, lcn, clen, dn = 0;
99 size_t idx;
100
101 if (!len)
102 goto out;
103
104 if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx)) {
105 failed:
106 run_truncate(run, vcn0);
107 err = -EINVAL;
108 goto out;
109 }
110
111 for (;;) {
112 if (clen > len)
113 clen = len;
114
115 if (!clen) {
116 err = -EINVAL;
117 goto out;
118 }
119
120 if (lcn != SPARSE_LCN) {
121 if (sbi) {
122 /* mark bitmap range [lcn + clen) as free and trim clusters. */
123 mark_as_free_ex(sbi, lcn, clen, trim);
124
125 if (run_da) {
126 CLST da_len;
127 if (!run_remove_range(run_da, vcn, clen,
128 &da_len)) {
129 err = -ENOMEM;
130 goto failed;
131 }
132 ntfs_sub_da(sbi, da_len);
133 }
134 }
135 dn += clen;
136 }
137
138 len -= clen;
139 if (!len)
140 break;
141
142 vcn_next = vcn + clen;
143 if (!run_get_entry(run, ++idx, &vcn, &lcn, &clen) ||
144 vcn != vcn_next) {
145 /* Save memory - don't load entire run. */
146 goto failed;
147 }
148 }
149
150 out:
151 if (done)
152 *done += dn;
153
154 return err;
155 }
156
157 /*
158 * attr_allocate_clusters - Find free space, mark it as used and store in @run.
159 */
attr_allocate_clusters(struct ntfs_sb_info * sbi,struct runs_tree * run,struct runs_tree * run_da,CLST vcn,CLST lcn,CLST len,CLST * pre_alloc,enum ALLOCATE_OPT opt,CLST * alen,const size_t fr,CLST * new_lcn,CLST * new_len)160 int attr_allocate_clusters(struct ntfs_sb_info *sbi, struct runs_tree *run,
161 struct runs_tree *run_da, CLST vcn, CLST lcn,
162 CLST len, CLST *pre_alloc, enum ALLOCATE_OPT opt,
163 CLST *alen, const size_t fr, CLST *new_lcn,
164 CLST *new_len)
165 {
166 int err;
167 CLST flen, vcn0 = vcn, pre = pre_alloc ? *pre_alloc : 0;
168 size_t cnt = run->count;
169
170 for (;;) {
171 err = ntfs_look_for_free_space(sbi, lcn, len + pre, &lcn, &flen,
172 opt);
173
174 if (err == -ENOSPC && pre) {
175 pre = 0;
176 if (*pre_alloc)
177 *pre_alloc = 0;
178 continue;
179 }
180
181 if (err == -ENOSPC && new_len && vcn - vcn0) {
182 /* Keep already allocated clusters. */
183 *alen = vcn - vcn0;
184 return 0;
185 }
186
187 if (err)
188 goto out;
189
190 if (vcn == vcn0) {
191 /* Return the first fragment. */
192 if (new_lcn)
193 *new_lcn = lcn;
194 if (new_len)
195 *new_len = flen;
196 }
197
198 /* Add new fragment into run storage. */
199 if (!run_add_entry(run, vcn, lcn, flen, opt & ALLOCATE_MFT)) {
200 undo_alloc:
201 /* Undo last 'ntfs_look_for_free_space' */
202 mark_as_free_ex(sbi, lcn, len, false);
203 err = -ENOMEM;
204 goto out;
205 }
206
207 if (run_da) {
208 CLST da_len;
209 if (!run_remove_range(run_da, vcn, flen, &da_len)) {
210 goto undo_alloc;
211 }
212 ntfs_sub_da(sbi, da_len);
213 }
214
215 if (opt & ALLOCATE_ZERO) {
216 u8 shift = sbi->cluster_bits - SECTOR_SHIFT;
217
218 err = blkdev_issue_zeroout(sbi->sb->s_bdev,
219 (sector_t)lcn << shift,
220 (sector_t)flen << shift,
221 GFP_NOFS, 0);
222 if (err)
223 goto out;
224 }
225
226 vcn += flen;
227
228 if (flen >= len || (opt & ALLOCATE_MFT) ||
229 (opt & ALLOCATE_ONE_FR) || (fr && run->count - cnt >= fr)) {
230 *alen = vcn - vcn0;
231 return 0;
232 }
233
234 len -= flen;
235 }
236
237 out:
238 /* Undo 'ntfs_look_for_free_space' */
239 if (vcn - vcn0) {
240 run_deallocate_ex(sbi, run, vcn0, vcn - vcn0, NULL, false,
241 run_da);
242 run_truncate(run, vcn0);
243 }
244
245 return err;
246 }
247
248 /*
249 * attr_make_nonresident
250 *
251 * If page is not NULL - it is already contains resident data
252 * and locked (called from ni_write_frame()).
253 */
attr_make_nonresident(struct ntfs_inode * ni,struct ATTRIB * attr,struct ATTR_LIST_ENTRY * le,struct mft_inode * mi,u64 new_size,struct runs_tree * run,struct ATTRIB ** ins_attr,struct page * page)254 int attr_make_nonresident(struct ntfs_inode *ni, struct ATTRIB *attr,
255 struct ATTR_LIST_ENTRY *le, struct mft_inode *mi,
256 u64 new_size, struct runs_tree *run,
257 struct ATTRIB **ins_attr, struct page *page)
258 {
259 struct ntfs_sb_info *sbi;
260 struct ATTRIB *attr_s;
261 struct MFT_REC *rec;
262 u32 used, asize, rsize, aoff;
263 bool is_data;
264 CLST len, alen;
265 char *next;
266 int err;
267
268 if (attr->non_res) {
269 *ins_attr = attr;
270 return 0;
271 }
272
273 sbi = mi->sbi;
274 rec = mi->mrec;
275 attr_s = NULL;
276 used = le32_to_cpu(rec->used);
277 asize = le32_to_cpu(attr->size);
278 next = Add2Ptr(attr, asize);
279 aoff = PtrOffset(rec, attr);
280 rsize = le32_to_cpu(attr->res.data_size);
281 is_data = attr->type == ATTR_DATA && !attr->name_len;
282
283 /* len - how many clusters required to store 'rsize' bytes */
284 if (is_attr_compressed(attr)) {
285 u8 shift = sbi->cluster_bits + NTFS_LZNT_CUNIT;
286 len = ((rsize + (1u << shift) - 1) >> shift) << NTFS_LZNT_CUNIT;
287 } else {
288 len = bytes_to_cluster(sbi, rsize);
289 }
290
291 run_init(run);
292
293 /* Make a copy of original attribute. */
294 attr_s = kmemdup(attr, asize, GFP_NOFS);
295 if (!attr_s) {
296 err = -ENOMEM;
297 goto out;
298 }
299
300 if (!len) {
301 /* Empty resident -> Empty nonresident. */
302 alen = 0;
303 } else {
304 const char *data = resident_data(attr);
305
306 err = attr_allocate_clusters(sbi, run, NULL, 0, 0, len, NULL,
307 ALLOCATE_DEF, &alen, 0, NULL,
308 NULL);
309 if (err)
310 goto out1;
311
312 if (!rsize) {
313 /* Empty resident -> Non empty nonresident. */
314 } else if (!is_data) {
315 err = ntfs_sb_write_run(sbi, run, 0, data, rsize, 0);
316 if (err)
317 goto out2;
318 } else if (!page) {
319 struct address_space *mapping = ni->vfs_inode.i_mapping;
320 struct folio *folio;
321
322 folio = __filemap_get_folio(
323 mapping, 0, FGP_LOCK | FGP_ACCESSED | FGP_CREAT,
324 mapping_gfp_mask(mapping));
325 if (IS_ERR(folio)) {
326 err = PTR_ERR(folio);
327 goto out2;
328 }
329 folio_fill_tail(folio, 0, data, rsize);
330 folio_mark_uptodate(folio);
331 folio_mark_dirty(folio);
332 folio_unlock(folio);
333 folio_put(folio);
334 }
335 }
336
337 /* Remove original attribute. */
338 used -= asize;
339 memmove(attr, Add2Ptr(attr, asize), used - aoff);
340 rec->used = cpu_to_le32(used);
341 mi->dirty = true;
342 if (le)
343 al_remove_le(ni, le);
344
345 err = ni_insert_nonresident(ni, attr_s->type, attr_name(attr_s),
346 attr_s->name_len, run, 0, alen,
347 attr_s->flags, &attr, NULL, NULL);
348 if (err)
349 goto out3;
350
351 kfree(attr_s);
352 attr->nres.data_size = cpu_to_le64(rsize);
353 attr->nres.valid_size = attr->nres.data_size;
354
355 *ins_attr = attr;
356
357 if (is_data)
358 ni->ni_flags &= ~NI_FLAG_RESIDENT;
359
360 /* Resident attribute becomes non resident. */
361 return 0;
362
363 out3:
364 attr = Add2Ptr(rec, aoff);
365 memmove(next, attr, used - aoff);
366 memcpy(attr, attr_s, asize);
367 rec->used = cpu_to_le32(used + asize);
368 mi->dirty = true;
369 out2:
370 /* Undo: do not trim new allocated clusters. */
371 run_deallocate(sbi, run, false);
372 run_close(run);
373 out1:
374 kfree(attr_s);
375 out:
376 return err;
377 }
378
379 /*
380 * attr_set_size_res - Helper for attr_set_size().
381 */
attr_set_size_res(struct ntfs_inode * ni,struct ATTRIB * attr,struct ATTR_LIST_ENTRY * le,struct mft_inode * mi,u64 new_size,struct runs_tree * run,struct ATTRIB ** ins_attr)382 static int attr_set_size_res(struct ntfs_inode *ni, struct ATTRIB *attr,
383 struct ATTR_LIST_ENTRY *le, struct mft_inode *mi,
384 u64 new_size, struct runs_tree *run,
385 struct ATTRIB **ins_attr)
386 {
387 struct ntfs_sb_info *sbi = mi->sbi;
388 struct MFT_REC *rec = mi->mrec;
389 u32 used = le32_to_cpu(rec->used);
390 u32 asize = le32_to_cpu(attr->size);
391 u32 aoff = PtrOffset(rec, attr);
392 u32 rsize = le32_to_cpu(attr->res.data_size);
393 u32 tail = used - aoff - asize;
394 char *next = Add2Ptr(attr, asize);
395 s64 dsize = ALIGN(new_size, 8) - ALIGN(rsize, 8);
396
397 if (dsize < 0) {
398 memmove(next + dsize, next, tail);
399 } else if (dsize > 0) {
400 if (used + dsize > sbi->max_bytes_per_attr)
401 return attr_make_nonresident(ni, attr, le, mi, new_size,
402 run, ins_attr, NULL);
403
404 memmove(next + dsize, next, tail);
405 memset(next, 0, dsize);
406 }
407
408 if (new_size > rsize)
409 memset(Add2Ptr(resident_data(attr), rsize), 0,
410 new_size - rsize);
411
412 rec->used = cpu_to_le32(used + dsize);
413 attr->size = cpu_to_le32(asize + dsize);
414 attr->res.data_size = cpu_to_le32(new_size);
415 mi->dirty = true;
416 *ins_attr = attr;
417
418 return 0;
419 }
420
421 /*
422 * attr_set_size_ex - Change the size of attribute.
423 *
424 * Extend:
425 * - Sparse/compressed: No allocated clusters.
426 * - Normal: Append allocated and preallocated new clusters.
427 * Shrink:
428 * - No deallocate if @keep_prealloc is set.
429 */
attr_set_size_ex(struct ntfs_inode * ni,enum ATTR_TYPE type,const __le16 * name,u8 name_len,struct runs_tree * run,u64 new_size,const u64 * new_valid,bool keep_prealloc,struct ATTRIB ** ret,bool no_da)430 int attr_set_size_ex(struct ntfs_inode *ni, enum ATTR_TYPE type,
431 const __le16 *name, u8 name_len, struct runs_tree *run,
432 u64 new_size, const u64 *new_valid, bool keep_prealloc,
433 struct ATTRIB **ret, bool no_da)
434 {
435 int err = 0;
436 struct ntfs_sb_info *sbi = ni->mi.sbi;
437 u8 cluster_bits = sbi->cluster_bits;
438 bool is_mft = ni->mi.rno == MFT_REC_MFT && type == ATTR_DATA &&
439 !name_len;
440 u64 old_valid, old_size, old_alloc, new_alloc_tmp;
441 u64 new_alloc = 0;
442 struct ATTRIB *attr = NULL, *attr_b;
443 struct ATTR_LIST_ENTRY *le, *le_b;
444 struct mft_inode *mi, *mi_b;
445 CLST alen, vcn, lcn, new_alen, old_alen, svcn, evcn;
446 CLST next_svcn, pre_alloc = -1, done = 0;
447 bool is_ext = false, is_bad = false;
448 bool dirty = false;
449 struct runs_tree *run_da = run == &ni->file.run ? &ni->file.run_da :
450 NULL;
451 bool da = !is_mft && sbi->options->delalloc && run_da && !no_da;
452 u32 align;
453 struct MFT_REC *rec;
454
455 again:
456 alen = 0;
457 le_b = NULL;
458 attr_b = ni_find_attr(ni, NULL, &le_b, type, name, name_len, NULL,
459 &mi_b);
460 if (!attr_b) {
461 err = -ENOENT;
462 goto bad_inode;
463 }
464
465 if (!attr_b->non_res) {
466 err = attr_set_size_res(ni, attr_b, le_b, mi_b, new_size, run,
467 &attr_b);
468 if (err)
469 return err;
470
471 /* Return if file is still resident. */
472 if (!attr_b->non_res) {
473 dirty = true;
474 goto ok1;
475 }
476
477 /* Layout of records may be changed, so do a full search. */
478 goto again;
479 }
480
481 is_ext = is_attr_ext(attr_b);
482 align = sbi->cluster_size;
483 if (is_ext) {
484 align <<= attr_b->nres.c_unit;
485 keep_prealloc = false;
486 da = false;
487 }
488
489 old_valid = le64_to_cpu(attr_b->nres.valid_size);
490 old_size = le64_to_cpu(attr_b->nres.data_size);
491 old_alloc = le64_to_cpu(attr_b->nres.alloc_size);
492
493 again_1:
494 old_alen = old_alloc >> cluster_bits;
495
496 new_alloc = (new_size + align - 1) & ~(u64)(align - 1);
497 new_alen = new_alloc >> cluster_bits;
498
499 if (keep_prealloc && new_size < old_size) {
500 attr_b->nres.data_size = cpu_to_le64(new_size);
501 mi_b->dirty = dirty = true;
502 goto ok;
503 }
504
505 if (da &&
506 (vcn = old_alen + run_len(&ni->file.run_da), new_alen > vcn)) {
507 /* Resize up normal file. Delay new clusters allocation. */
508 alen = new_alen - vcn;
509
510 if (ntfs_check_free_space(sbi, alen, 0, true)) {
511 if (!run_add_entry(&ni->file.run_da, vcn, SPARSE_LCN,
512 alen, false)) {
513 err = -ENOMEM;
514 goto out;
515 }
516
517 ntfs_add_da(sbi, alen);
518 goto ok1;
519 }
520 }
521
522 if (!keep_prealloc && run_da && run_da->count &&
523 (vcn = run_get_max_vcn(run_da), new_alen < vcn)) {
524 /* Shrink delayed clusters. */
525
526 /* Try to remove fragment from delay allocated run. */
527 if (!run_remove_range(run_da, new_alen, vcn - new_alen,
528 &alen)) {
529 err = -ENOMEM;
530 goto out;
531 }
532
533 ntfs_sub_da(sbi, alen);
534 }
535
536 vcn = old_alen - 1;
537
538 svcn = le64_to_cpu(attr_b->nres.svcn);
539 evcn = le64_to_cpu(attr_b->nres.evcn);
540
541 if (svcn <= vcn && vcn <= evcn) {
542 attr = attr_b;
543 le = le_b;
544 mi = mi_b;
545 } else if (!le_b) {
546 err = -EINVAL;
547 goto bad_inode;
548 } else {
549 le = le_b;
550 attr = ni_find_attr(ni, attr_b, &le, type, name, name_len, &vcn,
551 &mi);
552 if (!attr) {
553 err = -EINVAL;
554 goto bad_inode;
555 }
556
557 next_le_1:
558 svcn = le64_to_cpu(attr->nres.svcn);
559 evcn = le64_to_cpu(attr->nres.evcn);
560 }
561 /*
562 * Here we have:
563 * attr,mi,le - last attribute segment (containing 'vcn').
564 * attr_b,mi_b,le_b - base (primary) attribute segment.
565 */
566 next_le:
567 rec = mi->mrec;
568 err = attr_load_runs(attr, ni, run, NULL);
569 if (err)
570 goto out;
571
572 if (new_size > old_size) {
573 CLST to_allocate;
574 size_t free;
575
576 if (new_alloc <= old_alloc) {
577 attr_b->nres.data_size = cpu_to_le64(new_size);
578 mi_b->dirty = dirty = true;
579 goto ok;
580 }
581
582 /*
583 * Add clusters. In simple case we have to:
584 * - allocate space (vcn, lcn, len)
585 * - update packed run in 'mi'
586 * - update attr->nres.evcn
587 * - update attr_b->nres.data_size/attr_b->nres.alloc_size
588 */
589 to_allocate = new_alen - old_alen;
590 add_alloc_in_same_attr_seg:
591 lcn = 0;
592 if (is_mft) {
593 /* MFT allocates clusters from MFT zone. */
594 pre_alloc = 0;
595 } else if (is_ext) {
596 /* No preallocate for sparse/compress. */
597 pre_alloc = 0;
598 } else if (pre_alloc == -1) {
599 pre_alloc = 0;
600 if (type == ATTR_DATA && !name_len &&
601 sbi->options->prealloc) {
602 pre_alloc = bytes_to_cluster(
603 sbi, get_pre_allocated(
604 new_size)) -
605 new_alen;
606 }
607
608 /* Get the last LCN to allocate from. */
609 if (old_alen &&
610 !run_lookup_entry(run, vcn, &lcn, NULL, NULL)) {
611 lcn = SPARSE_LCN;
612 }
613
614 if (lcn == SPARSE_LCN)
615 lcn = 0;
616 else if (lcn)
617 lcn += 1;
618
619 free = wnd_zeroes(&sbi->used.bitmap);
620 if (to_allocate > free) {
621 err = -ENOSPC;
622 goto out;
623 }
624
625 if (pre_alloc && to_allocate + pre_alloc > free)
626 pre_alloc = 0;
627 }
628
629 vcn = old_alen;
630
631 if (is_ext) {
632 if (!run_add_entry(run, vcn, SPARSE_LCN, to_allocate,
633 false)) {
634 err = -ENOMEM;
635 goto out;
636 }
637 alen = to_allocate;
638 } else {
639 /* ~3 bytes per fragment. */
640 err = attr_allocate_clusters(
641 sbi, run, run_da, vcn, lcn, to_allocate,
642 &pre_alloc,
643 is_mft ? ALLOCATE_MFT : ALLOCATE_DEF, &alen,
644 is_mft ? 0 :
645 (sbi->record_size -
646 le32_to_cpu(rec->used) + 8) /
647 3 +
648 1,
649 NULL, NULL);
650 if (err)
651 goto out;
652 }
653
654 done += alen;
655 vcn += alen;
656 if (to_allocate > alen)
657 to_allocate -= alen;
658 else
659 to_allocate = 0;
660
661 pack_runs:
662 err = mi_pack_runs(mi, attr, run, vcn - svcn);
663 if (err)
664 goto undo_1;
665
666 next_svcn = le64_to_cpu(attr->nres.evcn) + 1;
667 new_alloc_tmp = (u64)next_svcn << cluster_bits;
668 attr_b->nres.alloc_size = cpu_to_le64(new_alloc_tmp);
669 mi_b->dirty = dirty = true;
670
671 if (next_svcn >= vcn && !to_allocate) {
672 /* Normal way. Update attribute and exit. */
673 attr_b->nres.data_size = cpu_to_le64(new_size);
674 goto ok;
675 }
676
677 /* At least two MFT to avoid recursive loop. */
678 if (is_mft && next_svcn == vcn &&
679 ((u64)done << sbi->cluster_bits) >= 2 * sbi->record_size) {
680 new_size = new_alloc_tmp;
681 attr_b->nres.data_size = attr_b->nres.alloc_size;
682 goto ok;
683 }
684
685 if (le32_to_cpu(rec->used) < sbi->record_size) {
686 old_alen = next_svcn;
687 evcn = old_alen - 1;
688 goto add_alloc_in_same_attr_seg;
689 }
690
691 attr_b->nres.data_size = attr_b->nres.alloc_size;
692 if (new_alloc_tmp < old_valid)
693 attr_b->nres.valid_size = attr_b->nres.data_size;
694
695 if (type == ATTR_LIST) {
696 err = ni_expand_list(ni);
697 if (err)
698 goto undo_2;
699 if (next_svcn < vcn)
700 goto pack_runs;
701
702 /* Layout of records is changed. */
703 goto again;
704 }
705
706 if (!ni->attr_list.size) {
707 err = ni_create_attr_list(ni);
708 /* In case of error layout of records is not changed. */
709 if (err)
710 goto undo_2;
711 /* Layout of records is changed. */
712 }
713
714 if (next_svcn >= vcn) {
715 /* This is MFT data, repeat. */
716 goto again;
717 }
718
719 /* Insert new attribute segment. */
720 err = ni_insert_nonresident(ni, type, name, name_len, run,
721 next_svcn, vcn - next_svcn,
722 attr_b->flags, &attr, &mi, NULL);
723
724 /*
725 * Layout of records maybe changed.
726 * Find base attribute to update.
727 */
728 le_b = NULL;
729 attr_b = ni_find_attr(ni, NULL, &le_b, type, name, name_len,
730 NULL, &mi_b);
731 if (!attr_b) {
732 err = -EINVAL;
733 goto bad_inode;
734 }
735
736 if (err) {
737 /* ni_insert_nonresident failed. */
738 attr = NULL;
739 goto undo_2;
740 }
741
742 /* keep runs for $MFT::$ATTR_DATA and $MFT::$ATTR_BITMAP. */
743 if (ni->mi.rno != MFT_REC_MFT)
744 run_truncate_head(run, evcn + 1);
745
746 svcn = le64_to_cpu(attr->nres.svcn);
747 evcn = le64_to_cpu(attr->nres.evcn);
748
749 /*
750 * Attribute is in consistency state.
751 * Save this point to restore to if next steps fail.
752 */
753 old_valid = old_size = old_alloc = (u64)vcn << cluster_bits;
754 attr_b->nres.valid_size = attr_b->nres.data_size =
755 attr_b->nres.alloc_size = cpu_to_le64(old_size);
756 mi_b->dirty = dirty = true;
757 goto again_1;
758 }
759
760 if (new_size != old_size ||
761 (new_alloc != old_alloc && !keep_prealloc)) {
762 /*
763 * Truncate clusters. In simple case we have to:
764 * - update packed run in 'mi'
765 * - update attr->nres.evcn
766 * - update attr_b->nres.data_size/attr_b->nres.alloc_size
767 * - mark and trim clusters as free (vcn, lcn, len)
768 */
769 CLST dlen = 0;
770
771 vcn = max(svcn, new_alen);
772 new_alloc_tmp = (u64)vcn << cluster_bits;
773
774 if (vcn > svcn) {
775 err = mi_pack_runs(mi, attr, run, vcn - svcn);
776 if (err)
777 goto out;
778 } else if (le && le->vcn) {
779 u16 le_sz = le16_to_cpu(le->size);
780
781 /*
782 * NOTE: List entries for one attribute are always
783 * the same size. We deal with last entry (vcn==0)
784 * and it is not first in entries array
785 * (list entry for std attribute always first).
786 * So it is safe to step back.
787 */
788 mi_remove_attr(NULL, mi, attr);
789
790 if (!al_remove_le(ni, le)) {
791 err = -EINVAL;
792 goto bad_inode;
793 }
794
795 le = (struct ATTR_LIST_ENTRY *)((u8 *)le - le_sz);
796 } else {
797 attr->nres.evcn = cpu_to_le64((u64)vcn - 1);
798 mi->dirty = true;
799 }
800
801 attr_b->nres.alloc_size = cpu_to_le64(new_alloc_tmp);
802
803 if (vcn == new_alen) {
804 attr_b->nres.data_size = cpu_to_le64(new_size);
805 if (new_size < old_valid)
806 attr_b->nres.valid_size =
807 attr_b->nres.data_size;
808 } else {
809 if (new_alloc_tmp <=
810 le64_to_cpu(attr_b->nres.data_size))
811 attr_b->nres.data_size =
812 attr_b->nres.alloc_size;
813 if (new_alloc_tmp <
814 le64_to_cpu(attr_b->nres.valid_size))
815 attr_b->nres.valid_size =
816 attr_b->nres.alloc_size;
817 }
818 mi_b->dirty = dirty = true;
819
820 err = run_deallocate_ex(sbi, run, vcn, evcn - vcn + 1, &dlen,
821 true, run_da);
822 if (err)
823 goto out;
824
825 if (is_ext) {
826 /* dlen - really deallocated clusters. */
827 le64_sub_cpu(&attr_b->nres.total_size,
828 (u64)dlen << cluster_bits);
829 }
830
831 run_truncate(run, vcn);
832
833 if (new_alloc_tmp <= new_alloc)
834 goto ok;
835
836 old_size = new_alloc_tmp;
837 vcn = svcn - 1;
838
839 if (le == le_b) {
840 attr = attr_b;
841 mi = mi_b;
842 evcn = svcn - 1;
843 svcn = 0;
844 goto next_le;
845 }
846
847 if (le->type != type || le->name_len != name_len ||
848 memcmp(le_name(le), name, name_len * sizeof(short))) {
849 err = -EINVAL;
850 goto bad_inode;
851 }
852
853 err = ni_load_mi(ni, le, &mi);
854 if (err)
855 goto out;
856
857 attr = mi_find_attr(ni, mi, NULL, type, name, name_len,
858 &le->id);
859 if (!attr) {
860 err = -EINVAL;
861 goto bad_inode;
862 }
863 goto next_le_1;
864 }
865
866 ok:
867 if (new_valid) {
868 __le64 valid = cpu_to_le64(min(*new_valid, new_size));
869
870 if (attr_b->nres.valid_size != valid) {
871 attr_b->nres.valid_size = valid;
872 mi_b->dirty = true;
873 }
874 }
875
876 ok1:
877 if (ret)
878 *ret = attr_b;
879
880 if (((type == ATTR_DATA && !name_len) ||
881 (type == ATTR_ALLOC && name == I30_NAME))) {
882 /* Update inode_set_bytes. */
883 if (attr_b->non_res &&
884 inode_get_bytes(&ni->vfs_inode) != new_alloc) {
885 inode_set_bytes(&ni->vfs_inode, new_alloc);
886 dirty = true;
887 }
888
889 i_size_write(&ni->vfs_inode, new_size);
890
891 /* Don't forget to update duplicate information in parent. */
892 if (dirty) {
893 ni->ni_flags |= NI_FLAG_UPDATE_PARENT;
894 mark_inode_dirty(&ni->vfs_inode);
895 }
896 }
897
898 return 0;
899
900 undo_2:
901 vcn -= alen;
902 attr_b->nres.data_size = cpu_to_le64(old_size);
903 attr_b->nres.valid_size = cpu_to_le64(old_valid);
904 attr_b->nres.alloc_size = cpu_to_le64(old_alloc);
905
906 /* Restore 'attr' and 'mi'. */
907 if (attr)
908 goto restore_run;
909
910 if (le64_to_cpu(attr_b->nres.svcn) <= svcn &&
911 svcn <= le64_to_cpu(attr_b->nres.evcn)) {
912 attr = attr_b;
913 le = le_b;
914 mi = mi_b;
915 } else if (!le_b) {
916 err = -EINVAL;
917 goto bad_inode;
918 } else {
919 le = le_b;
920 attr = ni_find_attr(ni, attr_b, &le, type, name, name_len,
921 &svcn, &mi);
922 if (!attr)
923 goto bad_inode;
924 }
925
926 restore_run:
927 if (mi_pack_runs(mi, attr, run, evcn - svcn + 1))
928 is_bad = true;
929
930 undo_1:
931 run_deallocate_ex(sbi, run, vcn, alen, NULL, false, run_da);
932
933 run_truncate(run, vcn);
934 out:
935 if (is_bad) {
936 bad_inode:
937 _ntfs_bad_inode(&ni->vfs_inode);
938 }
939 return err;
940 }
941
942 /*
943 * attr_data_get_block - Returns 'lcn' and 'len' for given 'vcn'.
944 *
945 * @new == NULL means just to get current mapping for 'vcn'
946 * @new != NULL means allocate real cluster if 'vcn' maps to hole
947 * @zero - zeroout new allocated clusters
948 *
949 * NOTE:
950 * - @new != NULL is called only for sparsed or compressed attributes.
951 * - new allocated clusters are zeroed via blkdev_issue_zeroout.
952 */
attr_data_get_block(struct ntfs_inode * ni,CLST vcn,CLST clen,CLST * lcn,CLST * len,bool * new,bool zero,void ** res,bool no_da)953 int attr_data_get_block(struct ntfs_inode *ni, CLST vcn, CLST clen, CLST *lcn,
954 CLST *len, bool *new, bool zero, void **res, bool no_da)
955 {
956 int err;
957
958 if (new)
959 *new = false;
960 if (res)
961 *res = NULL;
962
963 /* Try to find in cache. */
964 down_read(&ni->file.run_lock);
965 if (!no_da && run_lookup_entry(&ni->file.run_da, vcn, lcn, len, NULL)) {
966 /* The requested vcn is delay allocated. */
967 *lcn = DELALLOC_LCN;
968 } else if (run_lookup_entry(&ni->file.run, vcn, lcn, len, NULL)) {
969 /* The requested vcn is known in current run. */
970 } else {
971 *len = 0;
972 }
973 up_read(&ni->file.run_lock);
974
975 if (*len && (*lcn != SPARSE_LCN || !new))
976 return 0; /* Fast normal way without allocation. */
977
978 /* No cluster in cache or we need to allocate cluster in hole. */
979 ni_lock(ni);
980 down_write(&ni->file.run_lock);
981
982 err = attr_data_get_block_locked(ni, vcn, clen, lcn, len, new, zero,
983 res, no_da);
984
985 up_write(&ni->file.run_lock);
986 ni_unlock(ni);
987
988 return err;
989 }
990
991 /*
992 * attr_data_get_block_locked - Helper for attr_data_get_block.
993 */
attr_data_get_block_locked(struct ntfs_inode * ni,CLST vcn,CLST clen,CLST * lcn,CLST * len,bool * new,bool zero,void ** res,bool no_da)994 int attr_data_get_block_locked(struct ntfs_inode *ni, CLST vcn, CLST clen,
995 CLST *lcn, CLST *len, bool *new, bool zero,
996 void **res, bool no_da)
997 {
998 int err = 0;
999 struct ntfs_sb_info *sbi = ni->mi.sbi;
1000 struct runs_tree *run = &ni->file.run;
1001 struct runs_tree *run_da = &ni->file.run_da;
1002 bool da = sbi->options->delalloc && !no_da;
1003 u8 cluster_bits;
1004 struct ATTRIB *attr, *attr_b;
1005 struct ATTR_LIST_ENTRY *le, *le_b;
1006 struct mft_inode *mi, *mi_b;
1007 CLST hint, svcn, to_alloc, evcn1, next_svcn, asize, end, vcn0;
1008 CLST alloc, evcn;
1009 unsigned fr;
1010 u64 total_size, total_size0;
1011 int step;
1012
1013 again:
1014 if (da && run_lookup_entry(run_da, vcn, lcn, len, NULL)) {
1015 /* The requested vcn is delay allocated. */
1016 *lcn = DELALLOC_LCN;
1017 } else if (run_lookup_entry(run, vcn, lcn, len, NULL)) {
1018 /* The requested vcn is known in current run. */
1019 } else {
1020 *len = 0;
1021 }
1022
1023 if (*len) {
1024 if (*lcn != SPARSE_LCN || !new)
1025 goto out; /* normal way without allocation. */
1026 if (clen > *len)
1027 clen = *len;
1028 }
1029
1030 cluster_bits = sbi->cluster_bits;
1031 step = 0;
1032
1033 le_b = NULL;
1034 attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL, &mi_b);
1035 if (!attr_b) {
1036 err = -ENOENT;
1037 goto out;
1038 }
1039
1040 if (!attr_b->non_res) {
1041 u32 data_size = le32_to_cpu(attr_b->res.data_size);
1042 *lcn = RESIDENT_LCN;
1043 *len = data_size;
1044 if (res && data_size) {
1045 *res = kmemdup(resident_data(attr_b), data_size,
1046 GFP_KERNEL);
1047 if (!*res)
1048 err = -ENOMEM;
1049 }
1050 goto out;
1051 }
1052
1053 asize = le64_to_cpu(attr_b->nres.alloc_size) >> cluster_bits;
1054 if (vcn >= asize) {
1055 if (new) {
1056 err = -EINVAL;
1057 } else {
1058 *len = 1;
1059 *lcn = EOF_LCN;
1060 }
1061 goto out;
1062 }
1063
1064 svcn = le64_to_cpu(attr_b->nres.svcn);
1065 evcn1 = le64_to_cpu(attr_b->nres.evcn) + 1;
1066
1067 attr = attr_b;
1068 le = le_b;
1069 mi = mi_b;
1070
1071 if (le_b && (vcn < svcn || evcn1 <= vcn)) {
1072 attr = ni_find_attr(ni, attr_b, &le, ATTR_DATA, NULL, 0, &vcn,
1073 &mi);
1074 if (!attr) {
1075 err = -EINVAL;
1076 goto out;
1077 }
1078 svcn = le64_to_cpu(attr->nres.svcn);
1079 evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
1080 }
1081
1082 /* Load in cache actual information. */
1083 err = attr_load_runs(attr, ni, run, NULL);
1084 if (err)
1085 goto out;
1086
1087 /* Check for compressed frame. */
1088 err = attr_is_frame_compressed(ni, attr_b, vcn >> NTFS_LZNT_CUNIT,
1089 &hint, run);
1090 if (err)
1091 goto out;
1092
1093 if (hint) {
1094 /* if frame is compressed - don't touch it. */
1095 *lcn = COMPRESSED_LCN;
1096 /* length to the end of frame. */
1097 *len = NTFS_LZNT_CLUSTERS - (vcn & (NTFS_LZNT_CLUSTERS - 1));
1098 err = 0;
1099 goto out;
1100 }
1101
1102 if (!*len) {
1103 if (run_lookup_entry(run, vcn, lcn, len, NULL)) {
1104 if (*lcn != SPARSE_LCN || !new)
1105 goto ok; /* Slow normal way without allocation. */
1106
1107 if (clen > *len)
1108 clen = *len;
1109 } else if (!new) {
1110 /* Here we may return -ENOENT.
1111 * In any case caller gets zero length. */
1112 goto ok;
1113 }
1114 }
1115
1116 if (!is_attr_ext(attr_b)) {
1117 /* The code below only for sparsed or compressed attributes. */
1118 err = -EINVAL;
1119 goto out;
1120 }
1121
1122 vcn0 = vcn;
1123 to_alloc = clen;
1124 fr = (sbi->record_size - le32_to_cpu(mi->mrec->used) + 8) / 3 + 1;
1125 /* Allocate frame aligned clusters.
1126 * ntfs.sys usually uses 16 clusters per frame for sparsed or compressed.
1127 * ntfs3 uses 1 cluster per frame for new created sparsed files. */
1128 if (attr_b->nres.c_unit) {
1129 CLST clst_per_frame = 1u << attr_b->nres.c_unit;
1130 CLST cmask = ~(clst_per_frame - 1);
1131
1132 /* Get frame aligned vcn and to_alloc. */
1133 vcn = vcn0 & cmask;
1134 to_alloc = ((vcn0 + clen + clst_per_frame - 1) & cmask) - vcn;
1135 if (fr < clst_per_frame)
1136 fr = clst_per_frame;
1137 if (vcn != vcn0)
1138 zero = true;
1139
1140 /* Check if 'vcn' and 'vcn0' in different attribute segments. */
1141 if (vcn < svcn || evcn1 <= vcn) {
1142 struct ATTRIB *attr2;
1143 /* Load runs for truncated vcn. */
1144 attr2 = ni_find_attr(ni, attr_b, &le_b, ATTR_DATA, NULL,
1145 0, &vcn, &mi);
1146 if (!attr2) {
1147 err = -EINVAL;
1148 goto out;
1149 }
1150 evcn1 = le64_to_cpu(attr2->nres.evcn) + 1;
1151 err = attr_load_runs(attr2, ni, run, NULL);
1152 if (err)
1153 goto out;
1154 }
1155 da = false; /* no delalloc for compressed file. */
1156 }
1157
1158 if (vcn + to_alloc > asize)
1159 to_alloc = asize - vcn;
1160
1161 if (da) {
1162 CLST rlen1, rlen2;
1163 if (!ntfs_check_free_space(sbi, to_alloc, 0, true)) {
1164 err = ni_allocate_da_blocks_locked(ni);
1165 if (err)
1166 goto out;
1167 /* Layout of records may be changed. Start again without 'da'. */
1168 da = false;
1169 goto again;
1170 }
1171
1172 /* run_add_entry consolidates existed ranges. */
1173 rlen1 = run_len(run_da);
1174 if (!run_add_entry(run_da, vcn, SPARSE_LCN, to_alloc, false)) {
1175 err = -ENOMEM;
1176 goto out;
1177 }
1178 rlen2 = run_len(run_da);
1179
1180 /* new added delay clusters = rlen2 - rlen1. */
1181 ntfs_add_da(sbi, rlen2 - rlen1);
1182 *len = to_alloc;
1183 *lcn = DELALLOC_LCN;
1184 goto ok;
1185 }
1186
1187 /* Get the last LCN to allocate from. */
1188 hint = 0;
1189
1190 if (vcn > evcn1) {
1191 if (!run_add_entry(run, evcn1, SPARSE_LCN, vcn - evcn1,
1192 false)) {
1193 err = -ENOMEM;
1194 goto out;
1195 }
1196 } else if (vcn && !run_lookup_entry(run, vcn - 1, &hint, NULL, NULL)) {
1197 hint = -1;
1198 }
1199
1200 /* Allocate and zeroout new clusters. */
1201 err = attr_allocate_clusters(sbi, run, run_da, vcn, hint + 1, to_alloc,
1202 NULL,
1203 zero ? ALLOCATE_ZERO : ALLOCATE_ONE_FR,
1204 len, fr, lcn, len);
1205 if (err)
1206 goto out;
1207 *new = true;
1208 step = 1;
1209
1210 end = vcn + *len;
1211 /* Save 'total_size0' to restore if error. */
1212 total_size0 = le64_to_cpu(attr_b->nres.total_size);
1213 total_size = total_size0 + ((u64)*len << cluster_bits);
1214
1215 if (vcn != vcn0) {
1216 if (!run_lookup_entry(run, vcn0, lcn, len, NULL)) {
1217 err = -EINVAL;
1218 goto out;
1219 }
1220 if (*lcn == SPARSE_LCN) {
1221 /* Internal error. Should not happened. */
1222 WARN_ON(1);
1223 err = -EINVAL;
1224 goto out;
1225 }
1226 /* Check case when vcn0 + len overlaps new allocated clusters. */
1227 if (vcn0 + *len > end)
1228 *len = end - vcn0;
1229 }
1230
1231 repack:
1232 err = mi_pack_runs(mi, attr, run, max(end, evcn1) - svcn);
1233 if (err)
1234 goto out;
1235
1236 attr_b->nres.total_size = cpu_to_le64(total_size);
1237 inode_set_bytes(&ni->vfs_inode, total_size);
1238 ni->ni_flags |= NI_FLAG_UPDATE_PARENT;
1239
1240 mi_b->dirty = true;
1241 mark_inode_dirty(&ni->vfs_inode);
1242
1243 /* Stored [vcn : next_svcn) from [vcn : end). */
1244 next_svcn = le64_to_cpu(attr->nres.evcn) + 1;
1245
1246 if (end <= evcn1) {
1247 if (next_svcn == evcn1) {
1248 /* Normal way. Update attribute and exit. */
1249 goto ok;
1250 }
1251 /* Add new segment [next_svcn : evcn1 - next_svcn). */
1252 if (!ni->attr_list.size) {
1253 err = ni_create_attr_list(ni);
1254 if (err)
1255 goto undo1;
1256 /* Layout of records is changed. */
1257 le_b = NULL;
1258 attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL,
1259 0, NULL, &mi_b);
1260 if (!attr_b) {
1261 err = -ENOENT;
1262 goto out;
1263 }
1264
1265 attr = attr_b;
1266 le = le_b;
1267 mi = mi_b;
1268 goto repack;
1269 }
1270 }
1271
1272 /*
1273 * The code below may require additional cluster (to extend attribute list)
1274 * and / or one MFT record
1275 * It is too complex to undo operations if -ENOSPC occurs deep inside
1276 * in 'ni_insert_nonresident'.
1277 * Return in advance -ENOSPC here if there are no free cluster and no free MFT.
1278 */
1279 if (!ntfs_check_free_space(sbi, 1, 1, false)) {
1280 /* Undo step 1. */
1281 err = -ENOSPC;
1282 goto undo1;
1283 }
1284
1285 step = 2;
1286 svcn = evcn1;
1287
1288 /* Estimate next attribute. */
1289 attr = ni_find_attr(ni, attr, &le, ATTR_DATA, NULL, 0, &svcn, &mi);
1290
1291 if (!attr) {
1292 /* Insert new attribute segment. */
1293 goto ins_ext;
1294 }
1295
1296 /* Try to update existed attribute segment. */
1297 alloc = bytes_to_cluster(sbi, le64_to_cpu(attr_b->nres.alloc_size));
1298 evcn = le64_to_cpu(attr->nres.evcn);
1299
1300 if (end < next_svcn)
1301 end = next_svcn;
1302 while (end > evcn) {
1303 /* Remove segment [svcn : evcn). */
1304 mi_remove_attr(NULL, mi, attr);
1305
1306 if (!al_remove_le(ni, le)) {
1307 err = -EINVAL;
1308 goto out;
1309 }
1310
1311 if (evcn + 1 >= alloc) {
1312 /* Last attribute segment. */
1313 evcn1 = evcn + 1;
1314 goto ins_ext;
1315 }
1316
1317 if (ni_load_mi(ni, le, &mi)) {
1318 attr = NULL;
1319 goto out;
1320 }
1321
1322 attr = mi_find_attr(ni, mi, NULL, ATTR_DATA, NULL, 0, &le->id);
1323 if (!attr) {
1324 err = -EINVAL;
1325 goto out;
1326 }
1327 svcn = le64_to_cpu(attr->nres.svcn);
1328 evcn = le64_to_cpu(attr->nres.evcn);
1329 }
1330
1331 if (end < svcn)
1332 end = svcn;
1333
1334 err = attr_load_runs(attr, ni, run, &end);
1335 if (err)
1336 goto out;
1337
1338 evcn1 = evcn + 1;
1339 attr->nres.svcn = cpu_to_le64(next_svcn);
1340 err = mi_pack_runs(mi, attr, run, evcn1 - next_svcn);
1341 if (err)
1342 goto out;
1343
1344 le->vcn = cpu_to_le64(next_svcn);
1345 ni->attr_list.dirty = true;
1346 mi->dirty = true;
1347 next_svcn = le64_to_cpu(attr->nres.evcn) + 1;
1348
1349 ins_ext:
1350 if (evcn1 > next_svcn) {
1351 err = ni_insert_nonresident(ni, ATTR_DATA, NULL, 0, run,
1352 next_svcn, evcn1 - next_svcn,
1353 attr_b->flags, &attr, &mi, NULL);
1354 if (err)
1355 goto out;
1356 }
1357 ok:
1358 run_truncate_around(run, vcn);
1359 out:
1360 if (err && step > 1) {
1361 /* Too complex to restore. */
1362 _ntfs_bad_inode(&ni->vfs_inode);
1363 }
1364
1365 return err;
1366
1367 undo1:
1368 /* Undo step1. */
1369 attr_b->nres.total_size = cpu_to_le64(total_size0);
1370 inode_set_bytes(&ni->vfs_inode, total_size0);
1371
1372 if (run_deallocate_ex(sbi, run, vcn, *len, NULL, false, run_da) ||
1373 !run_add_entry(run, vcn, SPARSE_LCN, *len, false) ||
1374 mi_pack_runs(mi, attr, run, max(end, evcn1) - svcn)) {
1375 _ntfs_bad_inode(&ni->vfs_inode);
1376 }
1377 goto out;
1378 }
1379
attr_data_write_resident(struct ntfs_inode * ni,struct folio * folio)1380 int attr_data_write_resident(struct ntfs_inode *ni, struct folio *folio)
1381 {
1382 u64 vbo;
1383 struct mft_inode *mi;
1384 struct ATTRIB *attr;
1385 u32 data_size;
1386
1387 attr = ni_find_attr(ni, NULL, NULL, ATTR_DATA, NULL, 0, NULL, &mi);
1388 if (!attr)
1389 return -EINVAL;
1390
1391 if (attr->non_res) {
1392 /* Return special error code to check this case. */
1393 return E_NTFS_NONRESIDENT;
1394 }
1395
1396 vbo = folio_pos(folio);
1397 data_size = le32_to_cpu(attr->res.data_size);
1398 if (vbo < data_size) {
1399 char *data = resident_data(attr);
1400 size_t len = min(data_size - vbo, folio_size(folio));
1401
1402 memcpy_from_folio(data + vbo, folio, 0, len);
1403 mi->dirty = true;
1404 }
1405 ni->i_valid = data_size;
1406
1407 return 0;
1408 }
1409
1410 /*
1411 * attr_load_runs_vcn - Load runs with VCN.
1412 */
attr_load_runs_vcn(struct ntfs_inode * ni,enum ATTR_TYPE type,const __le16 * name,u8 name_len,struct runs_tree * run,CLST vcn)1413 int attr_load_runs_vcn(struct ntfs_inode *ni, enum ATTR_TYPE type,
1414 const __le16 *name, u8 name_len, struct runs_tree *run,
1415 CLST vcn)
1416 {
1417 struct ATTRIB *attr;
1418 int err;
1419 CLST svcn, evcn;
1420 u16 ro;
1421
1422 if (!ni) {
1423 /* Is record corrupted? */
1424 return -ENOENT;
1425 }
1426
1427 attr = ni_find_attr(ni, NULL, NULL, type, name, name_len, &vcn, NULL);
1428 if (!attr) {
1429 /* Is record corrupted? */
1430 return -ENOENT;
1431 }
1432
1433 svcn = le64_to_cpu(attr->nres.svcn);
1434 evcn = le64_to_cpu(attr->nres.evcn);
1435
1436 if (evcn < vcn || vcn < svcn) {
1437 /* Is record corrupted? */
1438 return -EINVAL;
1439 }
1440
1441 ro = le16_to_cpu(attr->nres.run_off);
1442
1443 if (ro > le32_to_cpu(attr->size))
1444 return -EINVAL;
1445
1446 err = run_unpack_ex(run, ni->mi.sbi, ni->mi.rno, svcn, evcn, svcn,
1447 Add2Ptr(attr, ro), le32_to_cpu(attr->size) - ro);
1448 if (err < 0)
1449 return err;
1450 return 0;
1451 }
1452
1453 /*
1454 * attr_load_runs_range - Load runs for given range [from to).
1455 */
attr_load_runs_range(struct ntfs_inode * ni,enum ATTR_TYPE type,const __le16 * name,u8 name_len,struct runs_tree * run,u64 from,u64 to)1456 int attr_load_runs_range(struct ntfs_inode *ni, enum ATTR_TYPE type,
1457 const __le16 *name, u8 name_len, struct runs_tree *run,
1458 u64 from, u64 to)
1459 {
1460 struct ntfs_sb_info *sbi = ni->mi.sbi;
1461 u8 cluster_bits = sbi->cluster_bits;
1462 CLST vcn;
1463 CLST vcn_last = (to - 1) >> cluster_bits;
1464 CLST lcn, clen;
1465 int err = 0;
1466 int retry = 0;
1467
1468 for (vcn = from >> cluster_bits; vcn <= vcn_last; vcn += clen) {
1469 if (run_lookup_entry(run, vcn, &lcn, &clen, NULL)) {
1470 retry = 0;
1471 continue;
1472 }
1473 if (retry) {
1474 err = -EINVAL;
1475 break;
1476 }
1477 err = attr_load_runs_vcn(ni, type, name, name_len, run, vcn);
1478 if (err)
1479 break;
1480
1481 clen = 0; /* Next run_lookup_entry(vcn) must be success. */
1482 retry++;
1483 }
1484
1485 return err;
1486 }
1487
1488 #ifdef CONFIG_NTFS3_LZX_XPRESS
1489 /*
1490 * attr_wof_frame_info
1491 *
1492 * Read header of Xpress/LZX file to get info about frame.
1493 */
attr_wof_frame_info(struct ntfs_inode * ni,struct ATTRIB * attr,struct runs_tree * run,u64 frame,u64 frames,u8 frame_bits,u32 * ondisk_size,u64 * vbo_data)1494 int attr_wof_frame_info(struct ntfs_inode *ni, struct ATTRIB *attr,
1495 struct runs_tree *run, u64 frame, u64 frames,
1496 u8 frame_bits, u32 *ondisk_size, u64 *vbo_data)
1497 {
1498 struct ntfs_sb_info *sbi = ni->mi.sbi;
1499 u64 vbo[2], off[2], wof_size;
1500 u32 voff;
1501 u8 bytes_per_off;
1502 char *addr;
1503 struct folio *folio;
1504 int i, err;
1505 __le32 *off32;
1506 __le64 *off64;
1507
1508 if (ni->vfs_inode.i_size < 0x100000000ull) {
1509 /* File starts with array of 32 bit offsets. */
1510 bytes_per_off = sizeof(__le32);
1511 vbo[1] = frame << 2;
1512 *vbo_data = frames << 2;
1513 } else {
1514 /* File starts with array of 64 bit offsets. */
1515 bytes_per_off = sizeof(__le64);
1516 vbo[1] = frame << 3;
1517 *vbo_data = frames << 3;
1518 }
1519
1520 /*
1521 * Read 4/8 bytes at [vbo - 4(8)] == offset where compressed frame starts.
1522 * Read 4/8 bytes at [vbo] == offset where compressed frame ends.
1523 */
1524 if (!attr->non_res) {
1525 if (vbo[1] + bytes_per_off > le32_to_cpu(attr->res.data_size)) {
1526 _ntfs_bad_inode(&ni->vfs_inode);
1527 return -EINVAL;
1528 }
1529 addr = resident_data(attr);
1530
1531 if (bytes_per_off == sizeof(__le32)) {
1532 off32 = Add2Ptr(addr, vbo[1]);
1533 off[0] = vbo[1] ? le32_to_cpu(off32[-1]) : 0;
1534 off[1] = le32_to_cpu(off32[0]);
1535 } else {
1536 off64 = Add2Ptr(addr, vbo[1]);
1537 off[0] = vbo[1] ? le64_to_cpu(off64[-1]) : 0;
1538 off[1] = le64_to_cpu(off64[0]);
1539 }
1540
1541 *vbo_data += off[0];
1542 *ondisk_size = off[1] - off[0];
1543 return 0;
1544 }
1545
1546 wof_size = le64_to_cpu(attr->nres.data_size);
1547 down_write(&ni->file.run_lock);
1548 folio = ni->file.offs_folio;
1549 if (!folio) {
1550 folio = folio_alloc(GFP_KERNEL, 0);
1551 if (!folio) {
1552 err = -ENOMEM;
1553 goto out;
1554 }
1555 folio->index = -1;
1556 ni->file.offs_folio = folio;
1557 }
1558 folio_lock(folio);
1559 addr = folio_address(folio);
1560
1561 if (vbo[1]) {
1562 voff = vbo[1] & (PAGE_SIZE - 1);
1563 vbo[0] = vbo[1] - bytes_per_off;
1564 i = 0;
1565 } else {
1566 voff = 0;
1567 vbo[0] = 0;
1568 off[0] = 0;
1569 i = 1;
1570 }
1571
1572 do {
1573 pgoff_t index = vbo[i] >> PAGE_SHIFT;
1574
1575 if (index != folio->index) {
1576 u64 from = vbo[i] & ~(u64)(PAGE_SIZE - 1);
1577 u64 to = min(from + PAGE_SIZE, wof_size);
1578
1579 err = attr_load_runs_range(ni, ATTR_DATA, WOF_NAME,
1580 ARRAY_SIZE(WOF_NAME), run,
1581 from, to);
1582 if (err)
1583 goto out1;
1584
1585 err = ntfs_read_run(sbi, run, addr, from, to - from);
1586 if (err) {
1587 folio->index = -1;
1588 goto out1;
1589 }
1590 folio->index = index;
1591 }
1592
1593 if (i) {
1594 if (bytes_per_off == sizeof(__le32)) {
1595 off32 = Add2Ptr(addr, voff);
1596 off[1] = le32_to_cpu(*off32);
1597 } else {
1598 off64 = Add2Ptr(addr, voff);
1599 off[1] = le64_to_cpu(*off64);
1600 }
1601 } else if (!voff) {
1602 if (bytes_per_off == sizeof(__le32)) {
1603 off32 = Add2Ptr(addr, PAGE_SIZE - sizeof(u32));
1604 off[0] = le32_to_cpu(*off32);
1605 } else {
1606 off64 = Add2Ptr(addr, PAGE_SIZE - sizeof(u64));
1607 off[0] = le64_to_cpu(*off64);
1608 }
1609 } else {
1610 /* Two values in one page. */
1611 if (bytes_per_off == sizeof(__le32)) {
1612 off32 = Add2Ptr(addr, voff);
1613 off[0] = le32_to_cpu(off32[-1]);
1614 off[1] = le32_to_cpu(off32[0]);
1615 } else {
1616 off64 = Add2Ptr(addr, voff);
1617 off[0] = le64_to_cpu(off64[-1]);
1618 off[1] = le64_to_cpu(off64[0]);
1619 }
1620 break;
1621 }
1622 } while (++i < 2);
1623
1624 *vbo_data += off[0];
1625 *ondisk_size = off[1] - off[0];
1626
1627 out1:
1628 folio_unlock(folio);
1629 out:
1630 up_write(&ni->file.run_lock);
1631 return err;
1632 }
1633 #endif
1634
1635 /*
1636 * attr_is_frame_compressed - Used to detect compressed frame.
1637 *
1638 * attr - base (primary) attribute segment.
1639 * run - run to use, usually == &ni->file.run.
1640 * Only base segments contains valid 'attr->nres.c_unit'
1641 */
attr_is_frame_compressed(struct ntfs_inode * ni,struct ATTRIB * attr,CLST frame,CLST * clst_data,struct runs_tree * run)1642 int attr_is_frame_compressed(struct ntfs_inode *ni, struct ATTRIB *attr,
1643 CLST frame, CLST *clst_data, struct runs_tree *run)
1644 {
1645 int err;
1646 u32 clst_frame;
1647 CLST clen, lcn, vcn, alen, slen, vcn_next;
1648 size_t idx;
1649
1650 *clst_data = 0;
1651
1652 if (!is_attr_compressed(attr))
1653 return 0;
1654
1655 if (!attr->non_res)
1656 return 0;
1657
1658 clst_frame = 1u << attr->nres.c_unit;
1659 vcn = frame * clst_frame;
1660
1661 if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx)) {
1662 err = attr_load_runs_vcn(ni, attr->type, attr_name(attr),
1663 attr->name_len, run, vcn);
1664 if (err)
1665 return err;
1666
1667 if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx))
1668 return -EINVAL;
1669 }
1670
1671 if (lcn == SPARSE_LCN) {
1672 /* Sparsed frame. */
1673 return 0;
1674 }
1675
1676 if (clen >= clst_frame) {
1677 /*
1678 * The frame is not compressed 'cause
1679 * it does not contain any sparse clusters.
1680 */
1681 *clst_data = clst_frame;
1682 return 0;
1683 }
1684
1685 alen = bytes_to_cluster(ni->mi.sbi, le64_to_cpu(attr->nres.alloc_size));
1686 slen = 0;
1687 *clst_data = clen;
1688
1689 /*
1690 * The frame is compressed if *clst_data + slen >= clst_frame.
1691 * Check next fragments.
1692 */
1693 while ((vcn += clen) < alen) {
1694 vcn_next = vcn;
1695
1696 if (!run_get_entry(run, ++idx, &vcn, &lcn, &clen) ||
1697 vcn_next != vcn) {
1698 err = attr_load_runs_vcn(ni, attr->type,
1699 attr_name(attr),
1700 attr->name_len, run, vcn_next);
1701 if (err)
1702 return err;
1703 vcn = vcn_next;
1704
1705 if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx))
1706 return -EINVAL;
1707 }
1708
1709 if (lcn == SPARSE_LCN) {
1710 slen += clen;
1711 } else {
1712 if (slen) {
1713 /*
1714 * Data_clusters + sparse_clusters =
1715 * not enough for frame.
1716 */
1717 return -EINVAL;
1718 }
1719 *clst_data += clen;
1720 }
1721
1722 if (*clst_data + slen >= clst_frame) {
1723 if (!slen) {
1724 /*
1725 * There is no sparsed clusters in this frame
1726 * so it is not compressed.
1727 */
1728 *clst_data = clst_frame;
1729 } else {
1730 /* Frame is compressed. */
1731 }
1732 break;
1733 }
1734 }
1735
1736 return 0;
1737 }
1738
1739 /*
1740 * attr_allocate_frame - Allocate/free clusters for @frame.
1741 *
1742 * Assumed: down_write(&ni->file.run_lock);
1743 */
attr_allocate_frame(struct ntfs_inode * ni,CLST frame,size_t compr_size,u64 new_valid)1744 int attr_allocate_frame(struct ntfs_inode *ni, CLST frame, size_t compr_size,
1745 u64 new_valid)
1746 {
1747 int err = 0;
1748 struct runs_tree *run = &ni->file.run;
1749 struct ntfs_sb_info *sbi = ni->mi.sbi;
1750 struct ATTRIB *attr = NULL, *attr_b;
1751 struct ATTR_LIST_ENTRY *le, *le_b;
1752 struct mft_inode *mi, *mi_b;
1753 CLST svcn, evcn1, next_svcn, len;
1754 CLST vcn, end, clst_data;
1755 u64 total_size, valid_size, data_size;
1756
1757 le_b = NULL;
1758 attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL, &mi_b);
1759 if (!attr_b)
1760 return -ENOENT;
1761
1762 if (!is_attr_ext(attr_b))
1763 return -EINVAL;
1764
1765 vcn = frame << NTFS_LZNT_CUNIT;
1766 total_size = le64_to_cpu(attr_b->nres.total_size);
1767
1768 svcn = le64_to_cpu(attr_b->nres.svcn);
1769 evcn1 = le64_to_cpu(attr_b->nres.evcn) + 1;
1770 data_size = le64_to_cpu(attr_b->nres.data_size);
1771
1772 if (svcn <= vcn && vcn < evcn1) {
1773 attr = attr_b;
1774 le = le_b;
1775 mi = mi_b;
1776 } else if (!le_b) {
1777 err = -EINVAL;
1778 goto out;
1779 } else {
1780 le = le_b;
1781 attr = ni_find_attr(ni, attr_b, &le, ATTR_DATA, NULL, 0, &vcn,
1782 &mi);
1783 if (!attr) {
1784 err = -EINVAL;
1785 goto out;
1786 }
1787 svcn = le64_to_cpu(attr->nres.svcn);
1788 evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
1789 }
1790
1791 err = attr_load_runs(attr, ni, run, NULL);
1792 if (err)
1793 goto out;
1794
1795 err = attr_is_frame_compressed(ni, attr_b, frame, &clst_data, run);
1796 if (err)
1797 goto out;
1798
1799 total_size -= (u64)clst_data << sbi->cluster_bits;
1800
1801 len = bytes_to_cluster(sbi, compr_size);
1802
1803 if (len == clst_data)
1804 goto out;
1805
1806 if (len < clst_data) {
1807 err = run_deallocate_ex(sbi, run, vcn + len, clst_data - len,
1808 NULL, true, NULL);
1809 if (err)
1810 goto out;
1811
1812 if (!run_add_entry(run, vcn + len, SPARSE_LCN, clst_data - len,
1813 false)) {
1814 err = -ENOMEM;
1815 goto out;
1816 }
1817 end = vcn + clst_data;
1818 /* Run contains updated range [vcn + len : end). */
1819 } else {
1820 CLST alen, hint = 0;
1821 /* Get the last LCN to allocate from. */
1822 if (vcn + clst_data &&
1823 !run_lookup_entry(run, vcn + clst_data - 1, &hint, NULL,
1824 NULL)) {
1825 hint = -1;
1826 }
1827
1828 err = attr_allocate_clusters(sbi, run, NULL, vcn + clst_data,
1829 hint + 1, len - clst_data, NULL,
1830 ALLOCATE_DEF, &alen, 0, NULL,
1831 NULL);
1832 if (err)
1833 goto out;
1834
1835 end = vcn + len;
1836 /* Run contains updated range [vcn + clst_data : end). */
1837 }
1838
1839 total_size += (u64)len << sbi->cluster_bits;
1840
1841 repack:
1842 err = mi_pack_runs(mi, attr, run, max(end, evcn1) - svcn);
1843 if (err)
1844 goto out;
1845
1846 attr_b->nres.total_size = cpu_to_le64(total_size);
1847 inode_set_bytes(&ni->vfs_inode, total_size);
1848 ni->ni_flags |= NI_FLAG_UPDATE_PARENT;
1849
1850 mi_b->dirty = true;
1851 mark_inode_dirty(&ni->vfs_inode);
1852
1853 /* Stored [vcn : next_svcn) from [vcn : end). */
1854 next_svcn = le64_to_cpu(attr->nres.evcn) + 1;
1855
1856 if (end <= evcn1) {
1857 if (next_svcn == evcn1) {
1858 /* Normal way. Update attribute and exit. */
1859 goto ok;
1860 }
1861 /* Add new segment [next_svcn : evcn1 - next_svcn). */
1862 if (!ni->attr_list.size) {
1863 err = ni_create_attr_list(ni);
1864 if (err)
1865 goto out;
1866 /* Layout of records is changed. */
1867 le_b = NULL;
1868 attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL,
1869 0, NULL, &mi_b);
1870 if (!attr_b) {
1871 err = -ENOENT;
1872 goto out;
1873 }
1874
1875 attr = attr_b;
1876 le = le_b;
1877 mi = mi_b;
1878 goto repack;
1879 }
1880 }
1881
1882 svcn = evcn1;
1883
1884 /* Estimate next attribute. */
1885 attr = ni_find_attr(ni, attr, &le, ATTR_DATA, NULL, 0, &svcn, &mi);
1886
1887 if (attr) {
1888 CLST alloc = bytes_to_cluster(
1889 sbi, le64_to_cpu(attr_b->nres.alloc_size));
1890 CLST evcn = le64_to_cpu(attr->nres.evcn);
1891
1892 if (end < next_svcn)
1893 end = next_svcn;
1894 while (end > evcn) {
1895 /* Remove segment [svcn : evcn). */
1896 mi_remove_attr(NULL, mi, attr);
1897
1898 if (!al_remove_le(ni, le)) {
1899 err = -EINVAL;
1900 goto out;
1901 }
1902
1903 if (evcn + 1 >= alloc) {
1904 /* Last attribute segment. */
1905 evcn1 = evcn + 1;
1906 goto ins_ext;
1907 }
1908
1909 if (ni_load_mi(ni, le, &mi)) {
1910 attr = NULL;
1911 goto out;
1912 }
1913
1914 attr = mi_find_attr(ni, mi, NULL, ATTR_DATA, NULL, 0,
1915 &le->id);
1916 if (!attr) {
1917 err = -EINVAL;
1918 goto out;
1919 }
1920 svcn = le64_to_cpu(attr->nres.svcn);
1921 evcn = le64_to_cpu(attr->nres.evcn);
1922 }
1923
1924 if (end < svcn)
1925 end = svcn;
1926
1927 err = attr_load_runs(attr, ni, run, &end);
1928 if (err)
1929 goto out;
1930
1931 evcn1 = evcn + 1;
1932 attr->nres.svcn = cpu_to_le64(next_svcn);
1933 err = mi_pack_runs(mi, attr, run, evcn1 - next_svcn);
1934 if (err)
1935 goto out;
1936
1937 le->vcn = cpu_to_le64(next_svcn);
1938 ni->attr_list.dirty = true;
1939 mi->dirty = true;
1940
1941 next_svcn = le64_to_cpu(attr->nres.evcn) + 1;
1942 }
1943 ins_ext:
1944 if (evcn1 > next_svcn) {
1945 err = ni_insert_nonresident(ni, ATTR_DATA, NULL, 0, run,
1946 next_svcn, evcn1 - next_svcn,
1947 attr_b->flags, &attr, &mi, NULL);
1948 if (err)
1949 goto out;
1950 }
1951 ok:
1952 run_truncate_around(run, vcn);
1953 out:
1954 if (attr_b) {
1955 if (new_valid > data_size)
1956 new_valid = data_size;
1957
1958 valid_size = le64_to_cpu(attr_b->nres.valid_size);
1959 if (new_valid != valid_size) {
1960 attr_b->nres.valid_size = cpu_to_le64(valid_size);
1961 mi_b->dirty = true;
1962 }
1963 }
1964
1965 return err;
1966 }
1967
1968 /*
1969 * attr_collapse_range - Collapse range in file.
1970 */
attr_collapse_range(struct ntfs_inode * ni,u64 vbo,u64 bytes)1971 int attr_collapse_range(struct ntfs_inode *ni, u64 vbo, u64 bytes)
1972 {
1973 int err = 0;
1974 struct runs_tree *run = &ni->file.run;
1975 struct ntfs_sb_info *sbi = ni->mi.sbi;
1976 struct ATTRIB *attr = NULL, *attr_b;
1977 struct ATTR_LIST_ENTRY *le, *le_b;
1978 struct mft_inode *mi, *mi_b;
1979 CLST svcn, evcn1, len, dealloc, alen, done;
1980 CLST vcn, end;
1981 u64 valid_size, data_size, alloc_size, total_size;
1982 u32 mask;
1983 u64 i_size;
1984 __le16 a_flags;
1985
1986 if (!bytes)
1987 return 0;
1988
1989 le_b = NULL;
1990 attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL, &mi_b);
1991 if (!attr_b)
1992 return -ENOENT;
1993
1994 if (!attr_b->non_res) {
1995 /* Attribute is resident. Nothing to do? */
1996 return 0;
1997 }
1998
1999 mask = is_attr_ext(attr_b) ?
2000 ((sbi->cluster_size << attr_b->nres.c_unit) - 1) :
2001 sbi->cluster_mask;
2002 if ((vbo | bytes) & mask) {
2003 /* Allow to collapse only cluster aligned ranges. */
2004 return -EINVAL;
2005 }
2006
2007 /* i_size - size of file with delay allocated clusters. */
2008 i_size = ni->vfs_inode.i_size;
2009
2010 if (vbo > i_size)
2011 return -EINVAL;
2012
2013 down_write(&ni->file.run_lock);
2014
2015 if (vbo + bytes >= i_size) {
2016 valid_size = min(ni->i_valid, vbo);
2017
2018 /* Simple truncate file at 'vbo'. */
2019 truncate_setsize(&ni->vfs_inode, vbo);
2020 err = attr_set_size(ni, ATTR_DATA, NULL, 0, &ni->file.run, vbo,
2021 &valid_size, true);
2022
2023 if (!err && valid_size < ni->i_valid)
2024 ni->i_valid = valid_size;
2025
2026 goto out;
2027 }
2028
2029 vcn = vbo >> sbi->cluster_bits;
2030 len = bytes >> sbi->cluster_bits;
2031 end = vcn + len;
2032 dealloc = 0;
2033 done = 0;
2034
2035 /*
2036 * Check delayed clusters.
2037 */
2038 if (ni->file.run_da.count) {
2039 struct runs_tree *run_da = &ni->file.run_da;
2040 if (run_is_mapped_full(run_da, vcn, end - 1)) {
2041 /*
2042 * The requested range is full in delayed clusters.
2043 */
2044 err = attr_set_size_ex(ni, ATTR_DATA, NULL, 0, run,
2045 i_size - bytes, NULL, false,
2046 NULL, true);
2047 goto out;
2048 }
2049
2050 /* Collapse request crosses real and delayed clusters. */
2051 err = ni_allocate_da_blocks_locked(ni);
2052 if (err)
2053 goto out;
2054
2055 /* Layout of records maybe changed. */
2056 le_b = NULL;
2057 attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL,
2058 &mi_b);
2059 if (!attr_b || !attr_b->non_res) {
2060 err = -ENOENT;
2061 goto out;
2062 }
2063 }
2064
2065 data_size = le64_to_cpu(attr_b->nres.data_size);
2066 alloc_size = le64_to_cpu(attr_b->nres.alloc_size);
2067 total_size = is_attr_ext(attr_b) ?
2068 le64_to_cpu(attr_b->nres.total_size) :
2069 alloc_size;
2070 alen = alloc_size >> sbi->cluster_bits;
2071 a_flags = attr_b->flags;
2072 svcn = le64_to_cpu(attr_b->nres.svcn);
2073 evcn1 = le64_to_cpu(attr_b->nres.evcn) + 1;
2074
2075 if (svcn <= vcn && vcn < evcn1) {
2076 attr = attr_b;
2077 le = le_b;
2078 mi = mi_b;
2079 goto check_seg;
2080 }
2081
2082 if (!le_b) {
2083 err = -EINVAL;
2084 goto out;
2085 }
2086
2087 le = le_b;
2088 attr = ni_find_attr(ni, attr_b, &le, ATTR_DATA, NULL, 0, &vcn, &mi);
2089 if (!attr) {
2090 err = -EINVAL;
2091 goto out;
2092 }
2093
2094 /*
2095 * Enumerate all attribute segments and collapse.
2096 */
2097 for (;;) {
2098 CLST vcn1, eat, next_svcn;
2099
2100 svcn = le64_to_cpu(attr->nres.svcn);
2101 evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
2102
2103 check_seg:
2104 if (svcn >= end) {
2105 /* Shift VCN- */
2106 attr->nres.svcn = cpu_to_le64(svcn - len);
2107 attr->nres.evcn = cpu_to_le64(evcn1 - 1 - len);
2108 if (le) {
2109 le->vcn = attr->nres.svcn;
2110 ni->attr_list.dirty = true;
2111 }
2112 mi->dirty = true;
2113 goto next_attr;
2114 }
2115
2116 run_truncate(run, 0);
2117 err = attr_load_runs(attr, ni, run, &svcn);
2118 if (err)
2119 goto out;
2120
2121 vcn1 = vcn + done; /* original vcn in attr/run. */
2122 eat = min(end, evcn1) - vcn1;
2123
2124 err = run_deallocate_ex(sbi, run, vcn1, eat, &dealloc, true,
2125 NULL);
2126 if (err)
2127 goto out;
2128
2129 if (svcn + eat < evcn1) {
2130 /* Collapse a part of this attribute segment. */
2131 if (!run_collapse_range(run, vcn1, eat, done)) {
2132 err = -ENOMEM;
2133 goto out;
2134 }
2135
2136 if (svcn >= vcn) {
2137 /* Shift VCN */
2138 attr->nres.svcn = cpu_to_le64(vcn);
2139 if (le && attr->nres.svcn != le->vcn) {
2140 le->vcn = attr->nres.svcn;
2141 ni->attr_list.dirty = true;
2142 }
2143 }
2144
2145 err = mi_pack_runs(mi, attr, run, evcn1 - svcn - eat);
2146 if (err)
2147 goto out;
2148
2149 next_svcn = le64_to_cpu(attr->nres.evcn) + 1;
2150 if (next_svcn + eat + done < evcn1) {
2151 err = ni_insert_nonresident(
2152 ni, ATTR_DATA, NULL, 0, run, next_svcn,
2153 evcn1 - eat - next_svcn, a_flags, &attr,
2154 &mi, &le);
2155 if (err)
2156 goto out;
2157
2158 /* Layout of records maybe changed. */
2159 attr_b = NULL;
2160 }
2161
2162 /* Free all allocated memory. */
2163 run_truncate(run, 0);
2164 done += eat;
2165 } else {
2166 u16 le_sz;
2167
2168 /* Delete this attribute segment. */
2169 mi_remove_attr(NULL, mi, attr);
2170 if (!le)
2171 break;
2172
2173 le_sz = le16_to_cpu(le->size);
2174 if (!al_remove_le(ni, le)) {
2175 err = -EINVAL;
2176 goto out;
2177 }
2178
2179 done += evcn1 - svcn;
2180 if (evcn1 >= alen)
2181 break;
2182
2183 if (!svcn) {
2184 /* Load next record that contains this attribute. */
2185 if (ni_load_mi(ni, le, &mi)) {
2186 err = -EINVAL;
2187 goto out;
2188 }
2189
2190 /* Look for required attribute. */
2191 attr = mi_find_attr(ni, mi, NULL, ATTR_DATA,
2192 NULL, 0, &le->id);
2193 if (!attr) {
2194 err = -EINVAL;
2195 goto out;
2196 }
2197 continue;
2198 }
2199 le = (struct ATTR_LIST_ENTRY *)((u8 *)le - le_sz);
2200 }
2201
2202 next_attr:
2203 if (evcn1 >= alen)
2204 break;
2205
2206 attr = ni_enum_attr_ex(ni, attr, &le, &mi);
2207 if (!attr) {
2208 err = -EINVAL;
2209 goto out;
2210 }
2211 }
2212
2213 if (!attr_b) {
2214 le_b = NULL;
2215 attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL,
2216 &mi_b);
2217 if (!attr_b) {
2218 err = -ENOENT;
2219 goto out;
2220 }
2221 }
2222
2223 data_size -= bytes;
2224 valid_size = ni->i_valid;
2225 if (vbo + bytes <= valid_size)
2226 valid_size -= bytes;
2227 else if (vbo < valid_size)
2228 valid_size = vbo;
2229
2230 attr_b->nres.alloc_size = cpu_to_le64(alloc_size - bytes);
2231 attr_b->nres.data_size = cpu_to_le64(data_size);
2232 attr_b->nres.valid_size = cpu_to_le64(min(valid_size, data_size));
2233 total_size -= (u64)dealloc << sbi->cluster_bits;
2234 if (is_attr_ext(attr_b))
2235 attr_b->nres.total_size = cpu_to_le64(total_size);
2236 mi_b->dirty = true;
2237
2238 /* Update inode size. */
2239 ni->i_valid = valid_size;
2240 i_size_write(&ni->vfs_inode, data_size);
2241 inode_set_bytes(&ni->vfs_inode, total_size);
2242 ni->ni_flags |= NI_FLAG_UPDATE_PARENT;
2243 mark_inode_dirty(&ni->vfs_inode);
2244
2245 out:
2246 up_write(&ni->file.run_lock);
2247 if (err)
2248 _ntfs_bad_inode(&ni->vfs_inode);
2249
2250 return err;
2251 }
2252
2253 /*
2254 * attr_punch_hole
2255 *
2256 * Not for normal files.
2257 */
attr_punch_hole(struct ntfs_inode * ni,u64 vbo,u64 bytes,u32 * frame_size)2258 int attr_punch_hole(struct ntfs_inode *ni, u64 vbo, u64 bytes, u32 *frame_size)
2259 {
2260 int err = 0;
2261 struct runs_tree *run = &ni->file.run;
2262 struct ntfs_sb_info *sbi = ni->mi.sbi;
2263 struct ATTRIB *attr = NULL, *attr_b;
2264 struct ATTR_LIST_ENTRY *le, *le_b;
2265 struct mft_inode *mi, *mi_b;
2266 CLST svcn, evcn1, vcn, len, end, alen, hole, next_svcn;
2267 u64 total_size, alloc_size;
2268 u32 mask;
2269 __le16 a_flags;
2270 struct runs_tree run2;
2271
2272 if (!bytes)
2273 return 0;
2274
2275 le_b = NULL;
2276 attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL, &mi_b);
2277 if (!attr_b)
2278 return -ENOENT;
2279
2280 if (!attr_b->non_res) {
2281 u32 data_size = le32_to_cpu(attr_b->res.data_size);
2282 u32 from, to;
2283
2284 if (vbo > data_size)
2285 return 0;
2286
2287 from = vbo;
2288 to = min_t(u64, vbo + bytes, data_size);
2289 memset(Add2Ptr(resident_data(attr_b), from), 0, to - from);
2290 return 0;
2291 }
2292
2293 if (!is_attr_ext(attr_b))
2294 return -EOPNOTSUPP;
2295
2296 alloc_size = le64_to_cpu(attr_b->nres.alloc_size);
2297 total_size = le64_to_cpu(attr_b->nres.total_size);
2298
2299 if (vbo >= alloc_size) {
2300 /* NOTE: It is allowed. */
2301 return 0;
2302 }
2303
2304 mask = (sbi->cluster_size << attr_b->nres.c_unit) - 1;
2305
2306 bytes += vbo;
2307 if (bytes > alloc_size)
2308 bytes = alloc_size;
2309 bytes -= vbo;
2310
2311 if ((vbo | bytes) & mask) {
2312 /* We have to zero a range(s). */
2313 if (!frame_size) {
2314 /* Caller insists range is aligned. */
2315 return -EINVAL;
2316 }
2317 *frame_size = mask + 1;
2318 return E_NTFS_NOTALIGNED;
2319 }
2320
2321 down_write(&ni->file.run_lock);
2322 run_init(&run2);
2323 run_truncate(run, 0);
2324
2325 /*
2326 * Enumerate all attribute segments and punch hole where necessary.
2327 */
2328 alen = alloc_size >> sbi->cluster_bits;
2329 vcn = vbo >> sbi->cluster_bits;
2330 len = bytes >> sbi->cluster_bits;
2331 end = vcn + len;
2332 hole = 0;
2333
2334 svcn = le64_to_cpu(attr_b->nres.svcn);
2335 evcn1 = le64_to_cpu(attr_b->nres.evcn) + 1;
2336 a_flags = attr_b->flags;
2337
2338 if (svcn <= vcn && vcn < evcn1) {
2339 attr = attr_b;
2340 le = le_b;
2341 mi = mi_b;
2342 } else if (!le_b) {
2343 err = -EINVAL;
2344 goto bad_inode;
2345 } else {
2346 le = le_b;
2347 attr = ni_find_attr(ni, attr_b, &le, ATTR_DATA, NULL, 0, &vcn,
2348 &mi);
2349 if (!attr) {
2350 err = -EINVAL;
2351 goto bad_inode;
2352 }
2353
2354 svcn = le64_to_cpu(attr->nres.svcn);
2355 evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
2356 }
2357
2358 while (svcn < end) {
2359 CLST vcn1, zero, hole2 = hole;
2360
2361 err = attr_load_runs(attr, ni, run, &svcn);
2362 if (err)
2363 goto done;
2364 vcn1 = max(vcn, svcn);
2365 zero = min(end, evcn1) - vcn1;
2366
2367 /*
2368 * Check range [vcn1 + zero).
2369 * Calculate how many clusters there are.
2370 * Don't do any destructive actions.
2371 */
2372 err = run_deallocate_ex(NULL, run, vcn1, zero, &hole2, false,
2373 NULL);
2374 if (err)
2375 goto done;
2376
2377 /* Check if required range is already hole. */
2378 if (hole2 == hole)
2379 goto next_attr;
2380
2381 /* Make a clone of run to undo. */
2382 err = run_clone(run, &run2);
2383 if (err)
2384 goto done;
2385
2386 /* Make a hole range (sparse) [vcn1 + zero). */
2387 if (!run_add_entry(run, vcn1, SPARSE_LCN, zero, false)) {
2388 err = -ENOMEM;
2389 goto done;
2390 }
2391
2392 /* Update run in attribute segment. */
2393 err = mi_pack_runs(mi, attr, run, evcn1 - svcn);
2394 if (err)
2395 goto done;
2396 next_svcn = le64_to_cpu(attr->nres.evcn) + 1;
2397 if (next_svcn < evcn1) {
2398 /* Insert new attribute segment. */
2399 err = ni_insert_nonresident(ni, ATTR_DATA, NULL, 0, run,
2400 next_svcn,
2401 evcn1 - next_svcn, a_flags,
2402 &attr, &mi, &le);
2403 if (err)
2404 goto undo_punch;
2405
2406 /* Layout of records maybe changed. */
2407 attr_b = NULL;
2408 }
2409
2410 /* Real deallocate. Should not fail. */
2411 run_deallocate_ex(sbi, &run2, vcn1, zero, &hole, true,
2412 &ni->file.run_da);
2413
2414 next_attr:
2415 /* Free all allocated memory. */
2416 run_truncate(run, 0);
2417
2418 if (evcn1 >= alen)
2419 break;
2420
2421 /* Get next attribute segment. */
2422 attr = ni_enum_attr_ex(ni, attr, &le, &mi);
2423 if (!attr) {
2424 err = -EINVAL;
2425 goto bad_inode;
2426 }
2427
2428 svcn = le64_to_cpu(attr->nres.svcn);
2429 evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
2430 }
2431
2432 done:
2433 if (!hole)
2434 goto out;
2435
2436 if (!attr_b) {
2437 attr_b = ni_find_attr(ni, NULL, NULL, ATTR_DATA, NULL, 0, NULL,
2438 &mi_b);
2439 if (!attr_b) {
2440 err = -EINVAL;
2441 goto bad_inode;
2442 }
2443 }
2444
2445 total_size -= (u64)hole << sbi->cluster_bits;
2446 attr_b->nres.total_size = cpu_to_le64(total_size);
2447 mi_b->dirty = true;
2448
2449 /* Update inode size. */
2450 inode_set_bytes(&ni->vfs_inode, total_size);
2451 ni->ni_flags |= NI_FLAG_UPDATE_PARENT;
2452 mark_inode_dirty(&ni->vfs_inode);
2453
2454 out:
2455 run_close(&run2);
2456 up_write(&ni->file.run_lock);
2457 return err;
2458
2459 bad_inode:
2460 _ntfs_bad_inode(&ni->vfs_inode);
2461 goto out;
2462
2463 undo_punch:
2464 /*
2465 * Restore packed runs.
2466 * 'mi_pack_runs' should not fail, cause we restore original.
2467 */
2468 if (mi_pack_runs(mi, attr, &run2, evcn1 - svcn))
2469 goto bad_inode;
2470
2471 goto done;
2472 }
2473
2474 /*
2475 * attr_insert_range - Insert range (hole) in file.
2476 * Not for normal files.
2477 */
attr_insert_range(struct ntfs_inode * ni,u64 vbo,u64 bytes)2478 int attr_insert_range(struct ntfs_inode *ni, u64 vbo, u64 bytes)
2479 {
2480 int err = 0;
2481 struct runs_tree *run = &ni->file.run;
2482 struct ntfs_sb_info *sbi = ni->mi.sbi;
2483 struct ATTRIB *attr = NULL, *attr_b;
2484 struct ATTR_LIST_ENTRY *le, *le_b;
2485 struct mft_inode *mi, *mi_b;
2486 CLST vcn, svcn, evcn1, len, next_svcn;
2487 u64 data_size, alloc_size;
2488 u32 mask;
2489 __le16 a_flags;
2490
2491 if (!bytes)
2492 return 0;
2493
2494 le_b = NULL;
2495 attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL, &mi_b);
2496 if (!attr_b)
2497 return -ENOENT;
2498
2499 if (!is_attr_ext(attr_b)) {
2500 /* It was checked above. See fallocate. */
2501 return -EOPNOTSUPP;
2502 }
2503
2504 if (!attr_b->non_res) {
2505 data_size = le32_to_cpu(attr_b->res.data_size);
2506 alloc_size = data_size;
2507 mask = sbi->cluster_mask; /* cluster_size - 1 */
2508 } else {
2509 data_size = le64_to_cpu(attr_b->nres.data_size);
2510 alloc_size = le64_to_cpu(attr_b->nres.alloc_size);
2511 mask = (sbi->cluster_size << attr_b->nres.c_unit) - 1;
2512 }
2513
2514 if (vbo >= data_size) {
2515 /*
2516 * Insert range after the file size is not allowed.
2517 * If the offset is equal to or greater than the end of
2518 * file, an error is returned. For such operations (i.e., inserting
2519 * a hole at the end of file), ftruncate(2) should be used.
2520 */
2521 return -EINVAL;
2522 }
2523
2524 if ((vbo | bytes) & mask) {
2525 /* Allow to insert only frame aligned ranges. */
2526 return -EINVAL;
2527 }
2528
2529 /*
2530 * valid_size <= data_size <= alloc_size
2531 * Check alloc_size for maximum possible.
2532 */
2533 if (bytes > sbi->maxbytes_sparse - alloc_size)
2534 return -EFBIG;
2535
2536 vcn = vbo >> sbi->cluster_bits;
2537 len = bytes >> sbi->cluster_bits;
2538
2539 down_write(&ni->file.run_lock);
2540
2541 if (!attr_b->non_res) {
2542 err = attr_set_size(ni, ATTR_DATA, NULL, 0, run,
2543 data_size + bytes, NULL, false);
2544
2545 le_b = NULL;
2546 attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL,
2547 &mi_b);
2548 if (!attr_b) {
2549 err = -EINVAL;
2550 goto bad_inode;
2551 }
2552
2553 if (err)
2554 goto out;
2555
2556 if (!attr_b->non_res) {
2557 /* Still resident. */
2558 char *data = Add2Ptr(attr_b,
2559 le16_to_cpu(attr_b->res.data_off));
2560
2561 memmove(data + bytes, data, bytes);
2562 memset(data, 0, bytes);
2563 goto done;
2564 }
2565
2566 /* Resident file becomes nonresident. */
2567 data_size = le64_to_cpu(attr_b->nres.data_size);
2568 alloc_size = le64_to_cpu(attr_b->nres.alloc_size);
2569 }
2570
2571 /*
2572 * Enumerate all attribute segments and shift start vcn.
2573 */
2574 a_flags = attr_b->flags;
2575 svcn = le64_to_cpu(attr_b->nres.svcn);
2576 evcn1 = le64_to_cpu(attr_b->nres.evcn) + 1;
2577
2578 if (svcn <= vcn && vcn < evcn1) {
2579 attr = attr_b;
2580 le = le_b;
2581 mi = mi_b;
2582 } else if (!le_b) {
2583 err = -EINVAL;
2584 goto bad_inode;
2585 } else {
2586 le = le_b;
2587 attr = ni_find_attr(ni, attr_b, &le, ATTR_DATA, NULL, 0, &vcn,
2588 &mi);
2589 if (!attr) {
2590 err = -EINVAL;
2591 goto bad_inode;
2592 }
2593
2594 svcn = le64_to_cpu(attr->nres.svcn);
2595 evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
2596 }
2597
2598 run_truncate(run, 0); /* clear cached values. */
2599 err = attr_load_runs(attr, ni, run, NULL);
2600 if (err)
2601 goto out;
2602
2603 err = run_insert_range(run, vcn, len);
2604 if (err)
2605 goto out;
2606
2607 err = run_insert_range_da(&ni->file.run_da, vcn, len);
2608 if (err)
2609 goto out;
2610
2611 /* Try to pack in current record as much as possible. */
2612 err = mi_pack_runs(mi, attr, run, evcn1 + len - svcn);
2613 if (err)
2614 goto out;
2615
2616 next_svcn = le64_to_cpu(attr->nres.evcn) + 1;
2617
2618 while ((attr = ni_enum_attr_ex(ni, attr, &le, &mi)) &&
2619 attr->type == ATTR_DATA && !attr->name_len) {
2620 le64_add_cpu(&attr->nres.svcn, len);
2621 le64_add_cpu(&attr->nres.evcn, len);
2622 if (le) {
2623 le->vcn = attr->nres.svcn;
2624 ni->attr_list.dirty = true;
2625 }
2626 mi->dirty = true;
2627 }
2628
2629 if (next_svcn < evcn1 + len) {
2630 err = ni_insert_nonresident(ni, ATTR_DATA, NULL, 0, run,
2631 next_svcn, evcn1 + len - next_svcn,
2632 a_flags, NULL, NULL, NULL);
2633
2634 le_b = NULL;
2635 attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL,
2636 &mi_b);
2637 if (!attr_b) {
2638 err = -EINVAL;
2639 goto bad_inode;
2640 }
2641
2642 if (err) {
2643 /* ni_insert_nonresident failed. Try to undo. */
2644 goto undo_insert_range;
2645 }
2646 }
2647
2648 /*
2649 * Update primary attribute segment.
2650 */
2651 if (vbo <= ni->i_valid)
2652 ni->i_valid += bytes;
2653
2654 attr_b->nres.data_size = cpu_to_le64(data_size + bytes);
2655 attr_b->nres.alloc_size = cpu_to_le64(alloc_size + bytes);
2656
2657 /* ni->valid may be not equal valid_size (temporary). */
2658 if (ni->i_valid > data_size + bytes)
2659 attr_b->nres.valid_size = attr_b->nres.data_size;
2660 else
2661 attr_b->nres.valid_size = cpu_to_le64(ni->i_valid);
2662 mi_b->dirty = true;
2663
2664 done:
2665 i_size_write(&ni->vfs_inode, ni->vfs_inode.i_size + bytes);
2666 ni->ni_flags |= NI_FLAG_UPDATE_PARENT;
2667 mark_inode_dirty(&ni->vfs_inode);
2668
2669 out:
2670 run_truncate(run, 0); /* clear cached values. */
2671
2672 up_write(&ni->file.run_lock);
2673
2674 return err;
2675
2676 bad_inode:
2677 _ntfs_bad_inode(&ni->vfs_inode);
2678 goto out;
2679
2680 undo_insert_range:
2681 svcn = le64_to_cpu(attr_b->nres.svcn);
2682 evcn1 = le64_to_cpu(attr_b->nres.evcn) + 1;
2683
2684 if (svcn <= vcn && vcn < evcn1) {
2685 attr = attr_b;
2686 le = le_b;
2687 mi = mi_b;
2688 } else if (!le_b) {
2689 goto bad_inode;
2690 } else {
2691 le = le_b;
2692 attr = ni_find_attr(ni, attr_b, &le, ATTR_DATA, NULL, 0, &vcn,
2693 &mi);
2694 if (!attr) {
2695 goto bad_inode;
2696 }
2697
2698 svcn = le64_to_cpu(attr->nres.svcn);
2699 evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
2700 }
2701
2702 if (attr_load_runs(attr, ni, run, NULL))
2703 goto bad_inode;
2704
2705 if (!run_collapse_range(run, vcn, len, 0))
2706 goto bad_inode;
2707
2708 if (mi_pack_runs(mi, attr, run, evcn1 + len - svcn))
2709 goto bad_inode;
2710
2711 while ((attr = ni_enum_attr_ex(ni, attr, &le, &mi)) &&
2712 attr->type == ATTR_DATA && !attr->name_len) {
2713 le64_sub_cpu(&attr->nres.svcn, len);
2714 le64_sub_cpu(&attr->nres.evcn, len);
2715 if (le) {
2716 le->vcn = attr->nres.svcn;
2717 ni->attr_list.dirty = true;
2718 }
2719 mi->dirty = true;
2720 }
2721
2722 goto out;
2723 }
2724
2725 /*
2726 * attr_force_nonresident
2727 *
2728 * Convert default data attribute into non resident form.
2729 */
attr_force_nonresident(struct ntfs_inode * ni)2730 int attr_force_nonresident(struct ntfs_inode *ni)
2731 {
2732 int err;
2733 struct ATTRIB *attr;
2734 struct ATTR_LIST_ENTRY *le = NULL;
2735 struct mft_inode *mi;
2736
2737 attr = ni_find_attr(ni, NULL, &le, ATTR_DATA, NULL, 0, NULL, &mi);
2738 if (!attr) {
2739 _ntfs_bad_inode(&ni->vfs_inode);
2740 return -ENOENT;
2741 }
2742
2743 if (attr->non_res) {
2744 /* Already non resident. */
2745 return 0;
2746 }
2747
2748 down_write(&ni->file.run_lock);
2749 err = attr_make_nonresident(ni, attr, le, mi,
2750 le32_to_cpu(attr->res.data_size),
2751 &ni->file.run, &attr, NULL);
2752 up_write(&ni->file.run_lock);
2753
2754 return err;
2755 }
2756