1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * linux/fs/ufs/inode.c
4 *
5 * Copyright (C) 1998
6 * Daniel Pirkl <daniel.pirkl@email.cz>
7 * Charles University, Faculty of Mathematics and Physics
8 *
9 * from
10 *
11 * linux/fs/ext2/inode.c
12 *
13 * Copyright (C) 1992, 1993, 1994, 1995
14 * Remy Card (card@masi.ibp.fr)
15 * Laboratoire MASI - Institut Blaise Pascal
16 * Universite Pierre et Marie Curie (Paris VI)
17 *
18 * from
19 *
20 * linux/fs/minix/inode.c
21 *
22 * Copyright (C) 1991, 1992 Linus Torvalds
23 *
24 * Goal-directed block allocation by Stephen Tweedie (sct@dcs.ed.ac.uk), 1993
25 * Big-endian to little-endian byte-swapping/bitmaps by
26 * David S. Miller (davem@caip.rutgers.edu), 1995
27 */
28
29 #include <linux/uaccess.h>
30
31 #include <linux/errno.h>
32 #include <linux/fs.h>
33 #include <linux/time.h>
34 #include <linux/stat.h>
35 #include <linux/string.h>
36 #include <linux/mm.h>
37 #include <linux/buffer_head.h>
38 #include <linux/mpage.h>
39 #include <linux/writeback.h>
40 #include <linux/iversion.h>
41
42 #include "ufs_fs.h"
43 #include "ufs.h"
44 #include "swab.h"
45 #include "util.h"
46
ufs_block_to_path(struct inode * inode,sector_t i_block,unsigned offsets[4])47 static int ufs_block_to_path(struct inode *inode, sector_t i_block, unsigned offsets[4])
48 {
49 struct ufs_sb_private_info *uspi = UFS_SB(inode->i_sb)->s_uspi;
50 int ptrs = uspi->s_apb;
51 int ptrs_bits = uspi->s_apbshift;
52 const long direct_blocks = UFS_NDADDR,
53 indirect_blocks = ptrs,
54 double_blocks = (1 << (ptrs_bits * 2));
55 int n = 0;
56
57
58 UFSD("ptrs=uspi->s_apb = %d,double_blocks=%ld \n",ptrs,double_blocks);
59 if (i_block < direct_blocks) {
60 offsets[n++] = i_block;
61 } else if ((i_block -= direct_blocks) < indirect_blocks) {
62 offsets[n++] = UFS_IND_BLOCK;
63 offsets[n++] = i_block;
64 } else if ((i_block -= indirect_blocks) < double_blocks) {
65 offsets[n++] = UFS_DIND_BLOCK;
66 offsets[n++] = i_block >> ptrs_bits;
67 offsets[n++] = i_block & (ptrs - 1);
68 } else if (((i_block -= double_blocks) >> (ptrs_bits * 2)) < ptrs) {
69 offsets[n++] = UFS_TIND_BLOCK;
70 offsets[n++] = i_block >> (ptrs_bits * 2);
71 offsets[n++] = (i_block >> ptrs_bits) & (ptrs - 1);
72 offsets[n++] = i_block & (ptrs - 1);
73 } else {
74 ufs_warning(inode->i_sb, "ufs_block_to_path", "block > big");
75 }
76 return n;
77 }
78
79 typedef struct {
80 void *p;
81 union {
82 __fs32 key32;
83 __fs64 key64;
84 };
85 struct buffer_head *bh;
86 } Indirect;
87
grow_chain32(struct ufs_inode_info * ufsi,struct buffer_head * bh,__fs32 * v,Indirect * from,Indirect * to)88 static inline int grow_chain32(struct ufs_inode_info *ufsi,
89 struct buffer_head *bh, __fs32 *v,
90 Indirect *from, Indirect *to)
91 {
92 Indirect *p;
93 unsigned seq;
94 to->bh = bh;
95 do {
96 seq = read_seqbegin(&ufsi->meta_lock);
97 to->key32 = *(__fs32 *)(to->p = v);
98 for (p = from; p <= to && p->key32 == *(__fs32 *)p->p; p++)
99 ;
100 } while (read_seqretry(&ufsi->meta_lock, seq));
101 return (p > to);
102 }
103
grow_chain64(struct ufs_inode_info * ufsi,struct buffer_head * bh,__fs64 * v,Indirect * from,Indirect * to)104 static inline int grow_chain64(struct ufs_inode_info *ufsi,
105 struct buffer_head *bh, __fs64 *v,
106 Indirect *from, Indirect *to)
107 {
108 Indirect *p;
109 unsigned seq;
110 to->bh = bh;
111 do {
112 seq = read_seqbegin(&ufsi->meta_lock);
113 to->key64 = *(__fs64 *)(to->p = v);
114 for (p = from; p <= to && p->key64 == *(__fs64 *)p->p; p++)
115 ;
116 } while (read_seqretry(&ufsi->meta_lock, seq));
117 return (p > to);
118 }
119
120 /*
121 * Returns the location of the fragment from
122 * the beginning of the filesystem.
123 */
124
ufs_frag_map(struct inode * inode,unsigned offsets[4],int depth)125 static u64 ufs_frag_map(struct inode *inode, unsigned offsets[4], int depth)
126 {
127 struct ufs_inode_info *ufsi = UFS_I(inode);
128 struct super_block *sb = inode->i_sb;
129 struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
130 u64 mask = (u64) uspi->s_apbmask>>uspi->s_fpbshift;
131 int shift = uspi->s_apbshift-uspi->s_fpbshift;
132 Indirect chain[4], *q = chain;
133 unsigned *p;
134 unsigned flags = UFS_SB(sb)->s_flags;
135 u64 res = 0;
136
137 UFSD(": uspi->s_fpbshift = %d ,uspi->s_apbmask = %x, mask=%llx\n",
138 uspi->s_fpbshift, uspi->s_apbmask,
139 (unsigned long long)mask);
140
141 if (depth == 0)
142 goto no_block;
143
144 again:
145 p = offsets;
146
147 if ((flags & UFS_TYPE_MASK) == UFS_TYPE_UFS2)
148 goto ufs2;
149
150 if (!grow_chain32(ufsi, NULL, &ufsi->i_u1.i_data[*p++], chain, q))
151 goto changed;
152 if (!q->key32)
153 goto no_block;
154 while (--depth) {
155 __fs32 *ptr;
156 struct buffer_head *bh;
157 unsigned n = *p++;
158
159 bh = sb_bread(sb, uspi->s_sbbase +
160 fs32_to_cpu(sb, q->key32) + (n>>shift));
161 if (!bh)
162 goto no_block;
163 ptr = (__fs32 *)bh->b_data + (n & mask);
164 if (!grow_chain32(ufsi, bh, ptr, chain, ++q))
165 goto changed;
166 if (!q->key32)
167 goto no_block;
168 }
169 res = fs32_to_cpu(sb, q->key32);
170 goto found;
171
172 ufs2:
173 if (!grow_chain64(ufsi, NULL, &ufsi->i_u1.u2_i_data[*p++], chain, q))
174 goto changed;
175 if (!q->key64)
176 goto no_block;
177
178 while (--depth) {
179 __fs64 *ptr;
180 struct buffer_head *bh;
181 unsigned n = *p++;
182
183 bh = sb_bread(sb, uspi->s_sbbase +
184 fs64_to_cpu(sb, q->key64) + (n>>shift));
185 if (!bh)
186 goto no_block;
187 ptr = (__fs64 *)bh->b_data + (n & mask);
188 if (!grow_chain64(ufsi, bh, ptr, chain, ++q))
189 goto changed;
190 if (!q->key64)
191 goto no_block;
192 }
193 res = fs64_to_cpu(sb, q->key64);
194 found:
195 res += uspi->s_sbbase;
196 no_block:
197 while (q > chain) {
198 brelse(q->bh);
199 q--;
200 }
201 return res;
202
203 changed:
204 while (q > chain) {
205 brelse(q->bh);
206 q--;
207 }
208 goto again;
209 }
210
211 /*
212 * Unpacking tails: we have a file with partial final block and
213 * we had been asked to extend it. If the fragment being written
214 * is within the same block, we need to extend the tail just to cover
215 * that fragment. Otherwise the tail is extended to full block.
216 *
217 * Note that we might need to create a _new_ tail, but that will
218 * be handled elsewhere; this is strictly for resizing old
219 * ones.
220 */
221 static bool
ufs_extend_tail(struct inode * inode,u64 writes_to,int * err,struct folio * locked_folio)222 ufs_extend_tail(struct inode *inode, u64 writes_to,
223 int *err, struct folio *locked_folio)
224 {
225 struct ufs_inode_info *ufsi = UFS_I(inode);
226 struct super_block *sb = inode->i_sb;
227 struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
228 unsigned lastfrag = ufsi->i_lastfrag; /* it's a short file, so unsigned is enough */
229 unsigned block = ufs_fragstoblks(lastfrag);
230 unsigned new_size;
231 void *p;
232 u64 tmp;
233
234 if (writes_to < (lastfrag | uspi->s_fpbmask))
235 new_size = (writes_to & uspi->s_fpbmask) + 1;
236 else
237 new_size = uspi->s_fpb;
238
239 p = ufs_get_direct_data_ptr(uspi, ufsi, block);
240 tmp = ufs_new_fragments(inode, p, lastfrag, ufs_data_ptr_to_cpu(sb, p),
241 new_size - (lastfrag & uspi->s_fpbmask), err,
242 locked_folio);
243 return tmp != 0;
244 }
245
246 /**
247 * ufs_inode_getfrag() - allocate new fragment(s)
248 * @inode: pointer to inode
249 * @index: number of block pointer within the inode's array.
250 * @new_fragment: number of new allocated fragment(s)
251 * @err: we set it if something wrong
252 * @new: we set it if we allocate new block
253 * @locked_folio: for ufs_new_fragments()
254 */
ufs_inode_getfrag(struct inode * inode,unsigned index,sector_t new_fragment,int * err,int * new,struct folio * locked_folio)255 static u64 ufs_inode_getfrag(struct inode *inode, unsigned index,
256 sector_t new_fragment, int *err,
257 int *new, struct folio *locked_folio)
258 {
259 struct ufs_inode_info *ufsi = UFS_I(inode);
260 struct super_block *sb = inode->i_sb;
261 struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
262 u64 tmp, goal, lastfrag;
263 unsigned nfrags = uspi->s_fpb;
264 void *p;
265
266 p = ufs_get_direct_data_ptr(uspi, ufsi, index);
267 tmp = ufs_data_ptr_to_cpu(sb, p);
268 if (tmp)
269 goto out;
270
271 lastfrag = ufsi->i_lastfrag;
272
273 /* will that be a new tail? */
274 if (new_fragment < UFS_NDIR_FRAGMENT && new_fragment >= lastfrag)
275 nfrags = (new_fragment & uspi->s_fpbmask) + 1;
276
277 goal = 0;
278 if (index) {
279 goal = ufs_data_ptr_to_cpu(sb,
280 ufs_get_direct_data_ptr(uspi, ufsi, index - 1));
281 if (goal)
282 goal += uspi->s_fpb;
283 }
284 tmp = ufs_new_fragments(inode, p, ufs_blknum(new_fragment),
285 goal, nfrags, err, locked_folio);
286
287 if (!tmp) {
288 *err = -ENOSPC;
289 return 0;
290 }
291
292 if (new)
293 *new = 1;
294 inode_set_ctime_current(inode);
295 if (IS_SYNC(inode))
296 ufs_sync_inode (inode);
297 mark_inode_dirty(inode);
298 out:
299 return tmp + uspi->s_sbbase;
300 }
301
302 /**
303 * ufs_inode_getblock() - allocate new block
304 * @inode: pointer to inode
305 * @ind_block: block number of the indirect block
306 * @index: number of pointer within the indirect block
307 * @new_fragment: number of new allocated fragment
308 * (block will hold this fragment and also uspi->s_fpb-1)
309 * @err: see ufs_inode_getfrag()
310 * @new: see ufs_inode_getfrag()
311 * @locked_folio: see ufs_inode_getfrag()
312 */
ufs_inode_getblock(struct inode * inode,u64 ind_block,unsigned index,sector_t new_fragment,int * err,int * new,struct folio * locked_folio)313 static u64 ufs_inode_getblock(struct inode *inode, u64 ind_block,
314 unsigned index, sector_t new_fragment, int *err,
315 int *new, struct folio *locked_folio)
316 {
317 struct super_block *sb = inode->i_sb;
318 struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
319 int shift = uspi->s_apbshift - uspi->s_fpbshift;
320 u64 tmp = 0, goal;
321 struct buffer_head *bh;
322 void *p;
323
324 if (!ind_block)
325 return 0;
326
327 bh = sb_bread(sb, ind_block + (index >> shift));
328 if (unlikely(!bh)) {
329 *err = -EIO;
330 return 0;
331 }
332
333 index &= uspi->s_apbmask >> uspi->s_fpbshift;
334 if (uspi->fs_magic == UFS2_MAGIC)
335 p = (__fs64 *)bh->b_data + index;
336 else
337 p = (__fs32 *)bh->b_data + index;
338
339 tmp = ufs_data_ptr_to_cpu(sb, p);
340 if (tmp)
341 goto out;
342
343 if (index && (uspi->fs_magic == UFS2_MAGIC ?
344 (tmp = fs64_to_cpu(sb, ((__fs64 *)bh->b_data)[index-1])) :
345 (tmp = fs32_to_cpu(sb, ((__fs32 *)bh->b_data)[index-1]))))
346 goal = tmp + uspi->s_fpb;
347 else
348 goal = bh->b_blocknr + uspi->s_fpb;
349 tmp = ufs_new_fragments(inode, p, ufs_blknum(new_fragment), goal,
350 uspi->s_fpb, err, locked_folio);
351 if (!tmp)
352 goto out;
353
354 if (new)
355 *new = 1;
356
357 mark_buffer_dirty(bh);
358 if (IS_SYNC(inode))
359 sync_dirty_buffer(bh);
360 inode_set_ctime_current(inode);
361 mark_inode_dirty(inode);
362 out:
363 brelse (bh);
364 UFSD("EXIT\n");
365 if (tmp)
366 tmp += uspi->s_sbbase;
367 return tmp;
368 }
369
370 /**
371 * ufs_getfrag_block() - `get_block_t' function, interface between UFS and
372 * read_folio, writepages and so on
373 */
374
ufs_getfrag_block(struct inode * inode,sector_t fragment,struct buffer_head * bh_result,int create)375 static int ufs_getfrag_block(struct inode *inode, sector_t fragment, struct buffer_head *bh_result, int create)
376 {
377 struct super_block *sb = inode->i_sb;
378 struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
379 int err = 0, new = 0;
380 unsigned offsets[4];
381 int depth = ufs_block_to_path(inode, fragment >> uspi->s_fpbshift, offsets);
382 u64 phys64 = 0;
383 unsigned frag = fragment & uspi->s_fpbmask;
384
385 phys64 = ufs_frag_map(inode, offsets, depth);
386 if (!create)
387 goto done;
388
389 if (phys64) {
390 if (fragment >= UFS_NDIR_FRAGMENT)
391 goto done;
392 read_seqlock_excl(&UFS_I(inode)->meta_lock);
393 if (fragment < UFS_I(inode)->i_lastfrag) {
394 read_sequnlock_excl(&UFS_I(inode)->meta_lock);
395 goto done;
396 }
397 read_sequnlock_excl(&UFS_I(inode)->meta_lock);
398 }
399 /* This code entered only while writing ....? */
400
401 mutex_lock(&UFS_I(inode)->truncate_mutex);
402
403 UFSD("ENTER, ino %lu, fragment %llu\n", inode->i_ino, (unsigned long long)fragment);
404 if (unlikely(!depth)) {
405 ufs_warning(sb, "ufs_get_block", "block > big");
406 err = -EIO;
407 goto out;
408 }
409
410 if (UFS_I(inode)->i_lastfrag < UFS_NDIR_FRAGMENT) {
411 unsigned lastfrag = UFS_I(inode)->i_lastfrag;
412 unsigned tailfrags = lastfrag & uspi->s_fpbmask;
413 if (tailfrags && fragment >= lastfrag) {
414 if (!ufs_extend_tail(inode, fragment,
415 &err, bh_result->b_folio))
416 goto out;
417 }
418 }
419
420 if (depth == 1) {
421 phys64 = ufs_inode_getfrag(inode, offsets[0], fragment,
422 &err, &new, bh_result->b_folio);
423 } else {
424 int i;
425 phys64 = ufs_inode_getfrag(inode, offsets[0], fragment,
426 &err, NULL, NULL);
427 for (i = 1; i < depth - 1; i++)
428 phys64 = ufs_inode_getblock(inode, phys64, offsets[i],
429 fragment, &err, NULL, NULL);
430 phys64 = ufs_inode_getblock(inode, phys64, offsets[depth - 1],
431 fragment, &err, &new, bh_result->b_folio);
432 }
433 out:
434 if (phys64) {
435 phys64 += frag;
436 map_bh(bh_result, sb, phys64);
437 if (new)
438 set_buffer_new(bh_result);
439 }
440 mutex_unlock(&UFS_I(inode)->truncate_mutex);
441 return err;
442
443 done:
444 if (phys64)
445 map_bh(bh_result, sb, phys64 + frag);
446 return 0;
447 }
448
ufs_writepages(struct address_space * mapping,struct writeback_control * wbc)449 static int ufs_writepages(struct address_space *mapping,
450 struct writeback_control *wbc)
451 {
452 return mpage_writepages(mapping, wbc, ufs_getfrag_block);
453 }
454
ufs_read_folio(struct file * file,struct folio * folio)455 static int ufs_read_folio(struct file *file, struct folio *folio)
456 {
457 return block_read_full_folio(folio, ufs_getfrag_block);
458 }
459
ufs_prepare_chunk(struct folio * folio,loff_t pos,unsigned len)460 int ufs_prepare_chunk(struct folio *folio, loff_t pos, unsigned len)
461 {
462 return __block_write_begin(folio, pos, len, ufs_getfrag_block);
463 }
464
465 static void ufs_truncate_blocks(struct inode *);
466
ufs_write_failed(struct address_space * mapping,loff_t to)467 static void ufs_write_failed(struct address_space *mapping, loff_t to)
468 {
469 struct inode *inode = mapping->host;
470
471 if (to > inode->i_size) {
472 truncate_pagecache(inode, inode->i_size);
473 ufs_truncate_blocks(inode);
474 }
475 }
476
ufs_write_begin(const struct kiocb * iocb,struct address_space * mapping,loff_t pos,unsigned len,struct folio ** foliop,void ** fsdata)477 static int ufs_write_begin(const struct kiocb *iocb,
478 struct address_space *mapping,
479 loff_t pos, unsigned len,
480 struct folio **foliop, void **fsdata)
481 {
482 int ret;
483
484 ret = block_write_begin(mapping, pos, len, foliop, ufs_getfrag_block);
485 if (unlikely(ret))
486 ufs_write_failed(mapping, pos + len);
487
488 return ret;
489 }
490
ufs_write_end(const struct kiocb * iocb,struct address_space * mapping,loff_t pos,unsigned len,unsigned copied,struct folio * folio,void * fsdata)491 static int ufs_write_end(const struct kiocb *iocb,
492 struct address_space *mapping,
493 loff_t pos, unsigned len, unsigned copied,
494 struct folio *folio, void *fsdata)
495 {
496 int ret;
497
498 ret = generic_write_end(iocb, mapping, pos, len, copied, folio, fsdata);
499 if (ret < len)
500 ufs_write_failed(mapping, pos + len);
501 return ret;
502 }
503
ufs_bmap(struct address_space * mapping,sector_t block)504 static sector_t ufs_bmap(struct address_space *mapping, sector_t block)
505 {
506 return generic_block_bmap(mapping,block,ufs_getfrag_block);
507 }
508
509 const struct address_space_operations ufs_aops = {
510 .dirty_folio = block_dirty_folio,
511 .invalidate_folio = block_invalidate_folio,
512 .read_folio = ufs_read_folio,
513 .writepages = ufs_writepages,
514 .write_begin = ufs_write_begin,
515 .write_end = ufs_write_end,
516 .migrate_folio = buffer_migrate_folio,
517 .bmap = ufs_bmap
518 };
519
ufs_set_inode_ops(struct inode * inode)520 static void ufs_set_inode_ops(struct inode *inode)
521 {
522 if (S_ISREG(inode->i_mode)) {
523 inode->i_op = &ufs_file_inode_operations;
524 inode->i_fop = &ufs_file_operations;
525 inode->i_mapping->a_ops = &ufs_aops;
526 } else if (S_ISDIR(inode->i_mode)) {
527 inode->i_op = &ufs_dir_inode_operations;
528 inode->i_fop = &ufs_dir_operations;
529 inode->i_mapping->a_ops = &ufs_aops;
530 } else if (S_ISLNK(inode->i_mode)) {
531 if (!inode->i_blocks) {
532 inode->i_link = (char *)UFS_I(inode)->i_u1.i_symlink;
533 inode->i_op = &simple_symlink_inode_operations;
534 } else {
535 inode->i_mapping->a_ops = &ufs_aops;
536 inode->i_op = &page_symlink_inode_operations;
537 inode_nohighmem(inode);
538 }
539 } else
540 init_special_inode(inode, inode->i_mode,
541 ufs_get_inode_dev(inode->i_sb, UFS_I(inode)));
542 }
543
ufs1_read_inode(struct inode * inode,struct ufs_inode * ufs_inode)544 static int ufs1_read_inode(struct inode *inode, struct ufs_inode *ufs_inode)
545 {
546 struct ufs_inode_info *ufsi = UFS_I(inode);
547 struct super_block *sb = inode->i_sb;
548 umode_t mode;
549
550 /*
551 * Copy data to the in-core inode.
552 */
553 inode->i_mode = mode = fs16_to_cpu(sb, ufs_inode->ui_mode);
554 set_nlink(inode, fs16_to_cpu(sb, ufs_inode->ui_nlink));
555 if (inode->i_nlink == 0)
556 return -ESTALE;
557
558 /*
559 * Linux now has 32-bit uid and gid, so we can support EFT.
560 */
561 i_uid_write(inode, ufs_get_inode_uid(sb, ufs_inode));
562 i_gid_write(inode, ufs_get_inode_gid(sb, ufs_inode));
563
564 inode->i_size = fs64_to_cpu(sb, ufs_inode->ui_size);
565 inode_set_atime(inode,
566 (signed)fs32_to_cpu(sb, ufs_inode->ui_atime.tv_sec),
567 0);
568 inode_set_ctime(inode,
569 (signed)fs32_to_cpu(sb, ufs_inode->ui_ctime.tv_sec),
570 0);
571 inode_set_mtime(inode,
572 (signed)fs32_to_cpu(sb, ufs_inode->ui_mtime.tv_sec),
573 0);
574 inode->i_blocks = fs32_to_cpu(sb, ufs_inode->ui_blocks);
575 inode->i_generation = fs32_to_cpu(sb, ufs_inode->ui_gen);
576 ufsi->i_flags = fs32_to_cpu(sb, ufs_inode->ui_flags);
577 ufsi->i_shadow = fs32_to_cpu(sb, ufs_inode->ui_u3.ui_sun.ui_shadow);
578 ufsi->i_oeftflag = fs32_to_cpu(sb, ufs_inode->ui_u3.ui_sun.ui_oeftflag);
579
580
581 if (S_ISCHR(mode) || S_ISBLK(mode) || inode->i_blocks) {
582 memcpy(ufsi->i_u1.i_data, &ufs_inode->ui_u2.ui_addr,
583 sizeof(ufs_inode->ui_u2.ui_addr));
584 } else {
585 memcpy(ufsi->i_u1.i_symlink, ufs_inode->ui_u2.ui_symlink,
586 sizeof(ufs_inode->ui_u2.ui_symlink) - 1);
587 ufsi->i_u1.i_symlink[sizeof(ufs_inode->ui_u2.ui_symlink) - 1] = 0;
588 }
589 return 0;
590 }
591
ufs2_read_inode(struct inode * inode,struct ufs2_inode * ufs2_inode)592 static int ufs2_read_inode(struct inode *inode, struct ufs2_inode *ufs2_inode)
593 {
594 struct ufs_inode_info *ufsi = UFS_I(inode);
595 struct super_block *sb = inode->i_sb;
596 umode_t mode;
597
598 UFSD("Reading ufs2 inode, ino %lu\n", inode->i_ino);
599 /*
600 * Copy data to the in-core inode.
601 */
602 inode->i_mode = mode = fs16_to_cpu(sb, ufs2_inode->ui_mode);
603 set_nlink(inode, fs16_to_cpu(sb, ufs2_inode->ui_nlink));
604 if (inode->i_nlink == 0)
605 return -ESTALE;
606
607 /*
608 * Linux now has 32-bit uid and gid, so we can support EFT.
609 */
610 i_uid_write(inode, fs32_to_cpu(sb, ufs2_inode->ui_uid));
611 i_gid_write(inode, fs32_to_cpu(sb, ufs2_inode->ui_gid));
612
613 inode->i_size = fs64_to_cpu(sb, ufs2_inode->ui_size);
614 inode_set_atime(inode, fs64_to_cpu(sb, ufs2_inode->ui_atime),
615 fs32_to_cpu(sb, ufs2_inode->ui_atimensec));
616 inode_set_ctime(inode, fs64_to_cpu(sb, ufs2_inode->ui_ctime),
617 fs32_to_cpu(sb, ufs2_inode->ui_ctimensec));
618 inode_set_mtime(inode, fs64_to_cpu(sb, ufs2_inode->ui_mtime),
619 fs32_to_cpu(sb, ufs2_inode->ui_mtimensec));
620 inode->i_blocks = fs64_to_cpu(sb, ufs2_inode->ui_blocks);
621 inode->i_generation = fs32_to_cpu(sb, ufs2_inode->ui_gen);
622 ufsi->i_flags = fs32_to_cpu(sb, ufs2_inode->ui_flags);
623 /*
624 ufsi->i_shadow = fs32_to_cpu(sb, ufs_inode->ui_u3.ui_sun.ui_shadow);
625 ufsi->i_oeftflag = fs32_to_cpu(sb, ufs_inode->ui_u3.ui_sun.ui_oeftflag);
626 */
627
628 if (S_ISCHR(mode) || S_ISBLK(mode) || inode->i_blocks) {
629 memcpy(ufsi->i_u1.u2_i_data, &ufs2_inode->ui_u2.ui_addr,
630 sizeof(ufs2_inode->ui_u2.ui_addr));
631 } else {
632 memcpy(ufsi->i_u1.i_symlink, ufs2_inode->ui_u2.ui_symlink,
633 sizeof(ufs2_inode->ui_u2.ui_symlink) - 1);
634 ufsi->i_u1.i_symlink[sizeof(ufs2_inode->ui_u2.ui_symlink) - 1] = 0;
635 }
636 return 0;
637 }
638
ufs_iget(struct super_block * sb,unsigned long ino)639 struct inode *ufs_iget(struct super_block *sb, unsigned long ino)
640 {
641 struct ufs_inode_info *ufsi;
642 struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
643 struct buffer_head * bh;
644 struct inode *inode;
645 int err = -EIO;
646
647 UFSD("ENTER, ino %lu\n", ino);
648
649 if (ino < UFS_ROOTINO || ino > (uspi->s_ncg * uspi->s_ipg)) {
650 ufs_warning(sb, "ufs_read_inode", "bad inode number (%lu)\n",
651 ino);
652 return ERR_PTR(-EIO);
653 }
654
655 inode = iget_locked(sb, ino);
656 if (!inode)
657 return ERR_PTR(-ENOMEM);
658 if (!(inode->i_state & I_NEW))
659 return inode;
660
661 ufsi = UFS_I(inode);
662
663 bh = sb_bread(sb, uspi->s_sbbase + ufs_inotofsba(inode->i_ino));
664 if (!bh) {
665 ufs_warning(sb, "ufs_read_inode", "unable to read inode %lu\n",
666 inode->i_ino);
667 goto bad_inode;
668 }
669 if ((UFS_SB(sb)->s_flags & UFS_TYPE_MASK) == UFS_TYPE_UFS2) {
670 struct ufs2_inode *ufs2_inode = (struct ufs2_inode *)bh->b_data;
671
672 err = ufs2_read_inode(inode,
673 ufs2_inode + ufs_inotofsbo(inode->i_ino));
674 } else {
675 struct ufs_inode *ufs_inode = (struct ufs_inode *)bh->b_data;
676
677 err = ufs1_read_inode(inode,
678 ufs_inode + ufs_inotofsbo(inode->i_ino));
679 }
680 brelse(bh);
681 if (err)
682 goto bad_inode;
683
684 inode_inc_iversion(inode);
685 ufsi->i_lastfrag =
686 (inode->i_size + uspi->s_fsize - 1) >> uspi->s_fshift;
687 ufsi->i_dir_start_lookup = 0;
688 ufsi->i_osync = 0;
689
690 ufs_set_inode_ops(inode);
691
692 UFSD("EXIT\n");
693 unlock_new_inode(inode);
694 return inode;
695
696 bad_inode:
697 iget_failed(inode);
698 return ERR_PTR(err);
699 }
700
ufs1_update_inode(struct inode * inode,struct ufs_inode * ufs_inode)701 static void ufs1_update_inode(struct inode *inode, struct ufs_inode *ufs_inode)
702 {
703 struct super_block *sb = inode->i_sb;
704 struct ufs_inode_info *ufsi = UFS_I(inode);
705
706 ufs_inode->ui_mode = cpu_to_fs16(sb, inode->i_mode);
707 ufs_inode->ui_nlink = cpu_to_fs16(sb, inode->i_nlink);
708
709 ufs_set_inode_uid(sb, ufs_inode, i_uid_read(inode));
710 ufs_set_inode_gid(sb, ufs_inode, i_gid_read(inode));
711
712 ufs_inode->ui_size = cpu_to_fs64(sb, inode->i_size);
713 ufs_inode->ui_atime.tv_sec = cpu_to_fs32(sb,
714 inode_get_atime_sec(inode));
715 ufs_inode->ui_atime.tv_usec = 0;
716 ufs_inode->ui_ctime.tv_sec = cpu_to_fs32(sb,
717 inode_get_ctime_sec(inode));
718 ufs_inode->ui_ctime.tv_usec = 0;
719 ufs_inode->ui_mtime.tv_sec = cpu_to_fs32(sb,
720 inode_get_mtime_sec(inode));
721 ufs_inode->ui_mtime.tv_usec = 0;
722 ufs_inode->ui_blocks = cpu_to_fs32(sb, inode->i_blocks);
723 ufs_inode->ui_flags = cpu_to_fs32(sb, ufsi->i_flags);
724 ufs_inode->ui_gen = cpu_to_fs32(sb, inode->i_generation);
725
726 if ((UFS_SB(sb)->s_flags & UFS_UID_MASK) == UFS_UID_EFT) {
727 ufs_inode->ui_u3.ui_sun.ui_shadow = cpu_to_fs32(sb, ufsi->i_shadow);
728 ufs_inode->ui_u3.ui_sun.ui_oeftflag = cpu_to_fs32(sb, ufsi->i_oeftflag);
729 }
730
731 if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
732 /* ufs_inode->ui_u2.ui_addr.ui_db[0] = cpu_to_fs32(sb, inode->i_rdev); */
733 ufs_inode->ui_u2.ui_addr.ui_db[0] = ufsi->i_u1.i_data[0];
734 } else if (inode->i_blocks) {
735 memcpy(&ufs_inode->ui_u2.ui_addr, ufsi->i_u1.i_data,
736 sizeof(ufs_inode->ui_u2.ui_addr));
737 }
738 else {
739 memcpy(&ufs_inode->ui_u2.ui_symlink, ufsi->i_u1.i_symlink,
740 sizeof(ufs_inode->ui_u2.ui_symlink));
741 }
742
743 if (!inode->i_nlink)
744 memset (ufs_inode, 0, sizeof(struct ufs_inode));
745 }
746
ufs2_update_inode(struct inode * inode,struct ufs2_inode * ufs_inode)747 static void ufs2_update_inode(struct inode *inode, struct ufs2_inode *ufs_inode)
748 {
749 struct super_block *sb = inode->i_sb;
750 struct ufs_inode_info *ufsi = UFS_I(inode);
751
752 UFSD("ENTER\n");
753 ufs_inode->ui_mode = cpu_to_fs16(sb, inode->i_mode);
754 ufs_inode->ui_nlink = cpu_to_fs16(sb, inode->i_nlink);
755
756 ufs_inode->ui_uid = cpu_to_fs32(sb, i_uid_read(inode));
757 ufs_inode->ui_gid = cpu_to_fs32(sb, i_gid_read(inode));
758
759 ufs_inode->ui_size = cpu_to_fs64(sb, inode->i_size);
760 ufs_inode->ui_atime = cpu_to_fs64(sb, inode_get_atime_sec(inode));
761 ufs_inode->ui_atimensec = cpu_to_fs32(sb,
762 inode_get_atime_nsec(inode));
763 ufs_inode->ui_ctime = cpu_to_fs64(sb, inode_get_ctime_sec(inode));
764 ufs_inode->ui_ctimensec = cpu_to_fs32(sb,
765 inode_get_ctime_nsec(inode));
766 ufs_inode->ui_mtime = cpu_to_fs64(sb, inode_get_mtime_sec(inode));
767 ufs_inode->ui_mtimensec = cpu_to_fs32(sb,
768 inode_get_mtime_nsec(inode));
769
770 ufs_inode->ui_blocks = cpu_to_fs64(sb, inode->i_blocks);
771 ufs_inode->ui_flags = cpu_to_fs32(sb, ufsi->i_flags);
772 ufs_inode->ui_gen = cpu_to_fs32(sb, inode->i_generation);
773
774 if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
775 /* ufs_inode->ui_u2.ui_addr.ui_db[0] = cpu_to_fs32(sb, inode->i_rdev); */
776 ufs_inode->ui_u2.ui_addr.ui_db[0] = ufsi->i_u1.u2_i_data[0];
777 } else if (inode->i_blocks) {
778 memcpy(&ufs_inode->ui_u2.ui_addr, ufsi->i_u1.u2_i_data,
779 sizeof(ufs_inode->ui_u2.ui_addr));
780 } else {
781 memcpy(&ufs_inode->ui_u2.ui_symlink, ufsi->i_u1.i_symlink,
782 sizeof(ufs_inode->ui_u2.ui_symlink));
783 }
784
785 if (!inode->i_nlink)
786 memset (ufs_inode, 0, sizeof(struct ufs2_inode));
787 UFSD("EXIT\n");
788 }
789
ufs_update_inode(struct inode * inode,int do_sync)790 static int ufs_update_inode(struct inode * inode, int do_sync)
791 {
792 struct super_block *sb = inode->i_sb;
793 struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
794 struct buffer_head * bh;
795
796 UFSD("ENTER, ino %lu\n", inode->i_ino);
797
798 if (inode->i_ino < UFS_ROOTINO ||
799 inode->i_ino > (uspi->s_ncg * uspi->s_ipg)) {
800 ufs_warning (sb, "ufs_read_inode", "bad inode number (%lu)\n", inode->i_ino);
801 return -1;
802 }
803
804 bh = sb_bread(sb, ufs_inotofsba(inode->i_ino));
805 if (!bh) {
806 ufs_warning (sb, "ufs_read_inode", "unable to read inode %lu\n", inode->i_ino);
807 return -1;
808 }
809 if (uspi->fs_magic == UFS2_MAGIC) {
810 struct ufs2_inode *ufs2_inode = (struct ufs2_inode *)bh->b_data;
811
812 ufs2_update_inode(inode,
813 ufs2_inode + ufs_inotofsbo(inode->i_ino));
814 } else {
815 struct ufs_inode *ufs_inode = (struct ufs_inode *) bh->b_data;
816
817 ufs1_update_inode(inode, ufs_inode + ufs_inotofsbo(inode->i_ino));
818 }
819
820 mark_buffer_dirty(bh);
821 if (do_sync)
822 sync_dirty_buffer(bh);
823 brelse (bh);
824
825 UFSD("EXIT\n");
826 return 0;
827 }
828
ufs_write_inode(struct inode * inode,struct writeback_control * wbc)829 int ufs_write_inode(struct inode *inode, struct writeback_control *wbc)
830 {
831 return ufs_update_inode(inode, wbc->sync_mode == WB_SYNC_ALL);
832 }
833
ufs_sync_inode(struct inode * inode)834 int ufs_sync_inode (struct inode *inode)
835 {
836 return ufs_update_inode (inode, 1);
837 }
838
ufs_evict_inode(struct inode * inode)839 void ufs_evict_inode(struct inode * inode)
840 {
841 int want_delete = 0;
842
843 if (!inode->i_nlink && !is_bad_inode(inode))
844 want_delete = 1;
845
846 truncate_inode_pages_final(&inode->i_data);
847 if (want_delete) {
848 inode->i_size = 0;
849 if (inode->i_blocks &&
850 (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
851 S_ISLNK(inode->i_mode)))
852 ufs_truncate_blocks(inode);
853 ufs_update_inode(inode, inode_needs_sync(inode));
854 }
855
856 invalidate_inode_buffers(inode);
857 clear_inode(inode);
858
859 if (want_delete)
860 ufs_free_inode(inode);
861 }
862
863 struct to_free {
864 struct inode *inode;
865 u64 to;
866 unsigned count;
867 };
868
free_data(struct to_free * ctx,u64 from,unsigned count)869 static inline void free_data(struct to_free *ctx, u64 from, unsigned count)
870 {
871 if (ctx->count && ctx->to != from) {
872 ufs_free_blocks(ctx->inode, ctx->to - ctx->count, ctx->count);
873 ctx->count = 0;
874 }
875 ctx->count += count;
876 ctx->to = from + count;
877 }
878
879 #define DIRECT_FRAGMENT ((inode->i_size + uspi->s_fsize - 1) >> uspi->s_fshift)
880
881 /*
882 * used only for truncation down to direct blocks.
883 */
ufs_trunc_direct(struct inode * inode)884 static void ufs_trunc_direct(struct inode *inode)
885 {
886 struct ufs_inode_info *ufsi = UFS_I(inode);
887 struct super_block *sb = inode->i_sb;
888 struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
889 unsigned int new_frags, old_frags;
890 unsigned int old_slot, new_slot;
891 unsigned int old_tail, new_tail;
892 struct to_free ctx = {.inode = inode};
893
894 UFSD("ENTER: ino %lu\n", inode->i_ino);
895
896 new_frags = DIRECT_FRAGMENT;
897 // new_frags = first fragment past the new EOF
898 old_frags = min_t(u64, UFS_NDIR_FRAGMENT, ufsi->i_lastfrag);
899 // old_frags = first fragment past the old EOF or covered by indirects
900
901 if (new_frags >= old_frags) // expanding - nothing to free
902 goto done;
903
904 old_tail = ufs_fragnum(old_frags);
905 old_slot = ufs_fragstoblks(old_frags);
906 new_tail = ufs_fragnum(new_frags);
907 new_slot = ufs_fragstoblks(new_frags);
908
909 if (old_slot == new_slot) { // old_tail > 0
910 void *p = ufs_get_direct_data_ptr(uspi, ufsi, old_slot);
911 u64 tmp = ufs_data_ptr_to_cpu(sb, p);
912 if (!tmp)
913 ufs_panic(sb, __func__, "internal error");
914 if (!new_tail) {
915 write_seqlock(&ufsi->meta_lock);
916 ufs_data_ptr_clear(uspi, p);
917 write_sequnlock(&ufsi->meta_lock);
918 }
919 ufs_free_fragments(inode, tmp + new_tail, old_tail - new_tail);
920 } else {
921 unsigned int slot = new_slot;
922
923 if (new_tail) {
924 void *p = ufs_get_direct_data_ptr(uspi, ufsi, slot++);
925 u64 tmp = ufs_data_ptr_to_cpu(sb, p);
926 if (!tmp)
927 ufs_panic(sb, __func__, "internal error");
928
929 ufs_free_fragments(inode, tmp + new_tail,
930 uspi->s_fpb - new_tail);
931 }
932 while (slot < old_slot) {
933 void *p = ufs_get_direct_data_ptr(uspi, ufsi, slot++);
934 u64 tmp = ufs_data_ptr_to_cpu(sb, p);
935 if (!tmp)
936 continue;
937 write_seqlock(&ufsi->meta_lock);
938 ufs_data_ptr_clear(uspi, p);
939 write_sequnlock(&ufsi->meta_lock);
940
941 free_data(&ctx, tmp, uspi->s_fpb);
942 }
943
944 free_data(&ctx, 0, 0);
945
946 if (old_tail) {
947 void *p = ufs_get_direct_data_ptr(uspi, ufsi, slot);
948 u64 tmp = ufs_data_ptr_to_cpu(sb, p);
949 if (!tmp)
950 ufs_panic(sb, __func__, "internal error");
951 write_seqlock(&ufsi->meta_lock);
952 ufs_data_ptr_clear(uspi, p);
953 write_sequnlock(&ufsi->meta_lock);
954
955 ufs_free_fragments(inode, tmp, old_tail);
956 }
957 }
958 done:
959 UFSD("EXIT: ino %lu\n", inode->i_ino);
960 }
961
free_full_branch(struct inode * inode,u64 ind_block,int depth)962 static void free_full_branch(struct inode *inode, u64 ind_block, int depth)
963 {
964 struct super_block *sb = inode->i_sb;
965 struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
966 struct ufs_buffer_head *ubh = ubh_bread(sb, ind_block, uspi->s_bsize);
967 unsigned i;
968
969 if (!ubh)
970 return;
971
972 if (--depth) {
973 for (i = 0; i < uspi->s_apb; i++) {
974 void *p = ubh_get_data_ptr(uspi, ubh, i);
975 u64 block = ufs_data_ptr_to_cpu(sb, p);
976 if (block)
977 free_full_branch(inode, block, depth);
978 }
979 } else {
980 struct to_free ctx = {.inode = inode};
981
982 for (i = 0; i < uspi->s_apb; i++) {
983 void *p = ubh_get_data_ptr(uspi, ubh, i);
984 u64 block = ufs_data_ptr_to_cpu(sb, p);
985 if (block)
986 free_data(&ctx, block, uspi->s_fpb);
987 }
988 free_data(&ctx, 0, 0);
989 }
990
991 ubh_bforget(ubh);
992 ufs_free_blocks(inode, ind_block, uspi->s_fpb);
993 }
994
free_branch_tail(struct inode * inode,unsigned from,struct ufs_buffer_head * ubh,int depth)995 static void free_branch_tail(struct inode *inode, unsigned from, struct ufs_buffer_head *ubh, int depth)
996 {
997 struct super_block *sb = inode->i_sb;
998 struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
999 unsigned i;
1000
1001 if (--depth) {
1002 for (i = from; i < uspi->s_apb ; i++) {
1003 void *p = ubh_get_data_ptr(uspi, ubh, i);
1004 u64 block = ufs_data_ptr_to_cpu(sb, p);
1005 if (block) {
1006 write_seqlock(&UFS_I(inode)->meta_lock);
1007 ufs_data_ptr_clear(uspi, p);
1008 write_sequnlock(&UFS_I(inode)->meta_lock);
1009 ubh_mark_buffer_dirty(ubh);
1010 free_full_branch(inode, block, depth);
1011 }
1012 }
1013 } else {
1014 struct to_free ctx = {.inode = inode};
1015
1016 for (i = from; i < uspi->s_apb; i++) {
1017 void *p = ubh_get_data_ptr(uspi, ubh, i);
1018 u64 block = ufs_data_ptr_to_cpu(sb, p);
1019 if (block) {
1020 write_seqlock(&UFS_I(inode)->meta_lock);
1021 ufs_data_ptr_clear(uspi, p);
1022 write_sequnlock(&UFS_I(inode)->meta_lock);
1023 ubh_mark_buffer_dirty(ubh);
1024 free_data(&ctx, block, uspi->s_fpb);
1025 }
1026 }
1027 free_data(&ctx, 0, 0);
1028 }
1029 if (IS_SYNC(inode) && ubh_buffer_dirty(ubh))
1030 ubh_sync_block(ubh);
1031 ubh_brelse(ubh);
1032 }
1033
ufs_alloc_lastblock(struct inode * inode,loff_t size)1034 static int ufs_alloc_lastblock(struct inode *inode, loff_t size)
1035 {
1036 int err = 0;
1037 struct super_block *sb = inode->i_sb;
1038 struct address_space *mapping = inode->i_mapping;
1039 struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
1040 unsigned i, end;
1041 sector_t lastfrag;
1042 struct folio *folio;
1043 struct buffer_head *bh;
1044 u64 phys64;
1045
1046 lastfrag = (size + uspi->s_fsize - 1) >> uspi->s_fshift;
1047
1048 if (!lastfrag)
1049 goto out;
1050
1051 lastfrag--;
1052
1053 folio = ufs_get_locked_folio(mapping, lastfrag >>
1054 (PAGE_SHIFT - inode->i_blkbits));
1055 if (IS_ERR(folio)) {
1056 err = -EIO;
1057 goto out;
1058 }
1059
1060 end = lastfrag & ((1 << (PAGE_SHIFT - inode->i_blkbits)) - 1);
1061 bh = folio_buffers(folio);
1062 for (i = 0; i < end; ++i)
1063 bh = bh->b_this_page;
1064
1065 err = ufs_getfrag_block(inode, lastfrag, bh, 1);
1066
1067 if (unlikely(err))
1068 goto out_unlock;
1069
1070 if (buffer_new(bh)) {
1071 clear_buffer_new(bh);
1072 clean_bdev_bh_alias(bh);
1073 /*
1074 * we do not zeroize fragment, because of
1075 * if it maped to hole, it already contains zeroes
1076 */
1077 set_buffer_uptodate(bh);
1078 mark_buffer_dirty(bh);
1079 folio_mark_dirty(folio);
1080 }
1081
1082 if (lastfrag >= UFS_IND_FRAGMENT) {
1083 end = uspi->s_fpb - ufs_fragnum(lastfrag) - 1;
1084 phys64 = bh->b_blocknr + 1;
1085 for (i = 0; i < end; ++i) {
1086 bh = sb_getblk(sb, i + phys64);
1087 lock_buffer(bh);
1088 memset(bh->b_data, 0, sb->s_blocksize);
1089 set_buffer_uptodate(bh);
1090 mark_buffer_dirty(bh);
1091 unlock_buffer(bh);
1092 sync_dirty_buffer(bh);
1093 brelse(bh);
1094 }
1095 }
1096 out_unlock:
1097 ufs_put_locked_folio(folio);
1098 out:
1099 return err;
1100 }
1101
ufs_truncate_blocks(struct inode * inode)1102 static void ufs_truncate_blocks(struct inode *inode)
1103 {
1104 struct ufs_inode_info *ufsi = UFS_I(inode);
1105 struct super_block *sb = inode->i_sb;
1106 struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
1107 unsigned offsets[4];
1108 int depth;
1109 int depth2;
1110 unsigned i;
1111 struct ufs_buffer_head *ubh[3];
1112 void *p;
1113 u64 block;
1114
1115 if (inode->i_size) {
1116 sector_t last = (inode->i_size - 1) >> uspi->s_bshift;
1117 depth = ufs_block_to_path(inode, last, offsets);
1118 if (!depth)
1119 return;
1120 } else {
1121 depth = 1;
1122 }
1123
1124 for (depth2 = depth - 1; depth2; depth2--)
1125 if (offsets[depth2] != uspi->s_apb - 1)
1126 break;
1127
1128 mutex_lock(&ufsi->truncate_mutex);
1129 if (depth == 1) {
1130 ufs_trunc_direct(inode);
1131 offsets[0] = UFS_IND_BLOCK;
1132 } else {
1133 /* get the blocks that should be partially emptied */
1134 p = ufs_get_direct_data_ptr(uspi, ufsi, offsets[0]++);
1135 for (i = 0; i < depth2; i++) {
1136 block = ufs_data_ptr_to_cpu(sb, p);
1137 if (!block)
1138 break;
1139 ubh[i] = ubh_bread(sb, block, uspi->s_bsize);
1140 if (!ubh[i]) {
1141 write_seqlock(&ufsi->meta_lock);
1142 ufs_data_ptr_clear(uspi, p);
1143 write_sequnlock(&ufsi->meta_lock);
1144 break;
1145 }
1146 p = ubh_get_data_ptr(uspi, ubh[i], offsets[i + 1]++);
1147 }
1148 while (i--)
1149 free_branch_tail(inode, offsets[i + 1], ubh[i], depth - i - 1);
1150 }
1151 for (i = offsets[0]; i <= UFS_TIND_BLOCK; i++) {
1152 p = ufs_get_direct_data_ptr(uspi, ufsi, i);
1153 block = ufs_data_ptr_to_cpu(sb, p);
1154 if (block) {
1155 write_seqlock(&ufsi->meta_lock);
1156 ufs_data_ptr_clear(uspi, p);
1157 write_sequnlock(&ufsi->meta_lock);
1158 free_full_branch(inode, block, i - UFS_IND_BLOCK + 1);
1159 }
1160 }
1161 read_seqlock_excl(&ufsi->meta_lock);
1162 ufsi->i_lastfrag = DIRECT_FRAGMENT;
1163 read_sequnlock_excl(&ufsi->meta_lock);
1164 mark_inode_dirty(inode);
1165 mutex_unlock(&ufsi->truncate_mutex);
1166 }
1167
ufs_truncate(struct inode * inode,loff_t size)1168 static int ufs_truncate(struct inode *inode, loff_t size)
1169 {
1170 int err = 0;
1171
1172 UFSD("ENTER: ino %lu, i_size: %llu, old_i_size: %llu\n",
1173 inode->i_ino, (unsigned long long)size,
1174 (unsigned long long)i_size_read(inode));
1175
1176 if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
1177 S_ISLNK(inode->i_mode)))
1178 return -EINVAL;
1179 if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
1180 return -EPERM;
1181
1182 err = ufs_alloc_lastblock(inode, size);
1183
1184 if (err)
1185 goto out;
1186
1187 block_truncate_page(inode->i_mapping, size, ufs_getfrag_block);
1188
1189 truncate_setsize(inode, size);
1190
1191 ufs_truncate_blocks(inode);
1192 inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
1193 mark_inode_dirty(inode);
1194 out:
1195 UFSD("EXIT: err %d\n", err);
1196 return err;
1197 }
1198
ufs_setattr(struct mnt_idmap * idmap,struct dentry * dentry,struct iattr * attr)1199 int ufs_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
1200 struct iattr *attr)
1201 {
1202 struct inode *inode = d_inode(dentry);
1203 unsigned int ia_valid = attr->ia_valid;
1204 int error;
1205
1206 error = setattr_prepare(&nop_mnt_idmap, dentry, attr);
1207 if (error)
1208 return error;
1209
1210 if (ia_valid & ATTR_SIZE && attr->ia_size != inode->i_size) {
1211 error = ufs_truncate(inode, attr->ia_size);
1212 if (error)
1213 return error;
1214 }
1215
1216 setattr_copy(&nop_mnt_idmap, inode, attr);
1217 mark_inode_dirty(inode);
1218 return 0;
1219 }
1220
1221 const struct inode_operations ufs_file_inode_operations = {
1222 .setattr = ufs_setattr,
1223 };
1224