1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * linux/fs/ext4/ioctl.c
4 *
5 * Copyright (C) 1993, 1994, 1995
6 * Remy Card (card@masi.ibp.fr)
7 * Laboratoire MASI - Institut Blaise Pascal
8 * Universite Pierre et Marie Curie (Paris VI)
9 */
10
11 #include <linux/fs.h>
12 #include <linux/capability.h>
13 #include <linux/time.h>
14 #include <linux/compat.h>
15 #include <linux/mount.h>
16 #include <linux/file.h>
17 #include <linux/quotaops.h>
18 #include <linux/random.h>
19 #include <linux/uaccess.h>
20 #include <linux/delay.h>
21 #include <linux/iversion.h>
22 #include <linux/fileattr.h>
23 #include <linux/uuid.h>
24 #include "ext4_jbd2.h"
25 #include "ext4.h"
26 #include <linux/fsmap.h>
27 #include "fsmap.h"
28 #include <trace/events/ext4.h>
29 #include <linux/fserror.h>
30
31 typedef void ext4_update_sb_callback(struct ext4_sb_info *sbi,
32 struct ext4_super_block *es,
33 const void *arg);
34
35 /*
36 * Superblock modification callback function for changing file system
37 * label
38 */
ext4_sb_setlabel(struct ext4_sb_info * sbi,struct ext4_super_block * es,const void * arg)39 static void ext4_sb_setlabel(struct ext4_sb_info *sbi,
40 struct ext4_super_block *es, const void *arg)
41 {
42 /* Sanity check, this should never happen */
43 BUILD_BUG_ON(sizeof(es->s_volume_name) < EXT4_LABEL_MAX);
44
45 memcpy(es->s_volume_name, (char *)arg, EXT4_LABEL_MAX);
46 }
47
48 /*
49 * Superblock modification callback function for changing file system
50 * UUID.
51 */
ext4_sb_setuuid(struct ext4_sb_info * sbi,struct ext4_super_block * es,const void * arg)52 static void ext4_sb_setuuid(struct ext4_sb_info *sbi,
53 struct ext4_super_block *es, const void *arg)
54 {
55 memcpy(es->s_uuid, (__u8 *)arg, UUID_SIZE);
56 }
57
58 static
ext4_update_primary_sb(struct super_block * sb,handle_t * handle,ext4_update_sb_callback func,const void * arg)59 int ext4_update_primary_sb(struct super_block *sb, handle_t *handle,
60 ext4_update_sb_callback func,
61 const void *arg)
62 {
63 int err = 0;
64 struct ext4_sb_info *sbi = EXT4_SB(sb);
65 struct buffer_head *bh = sbi->s_sbh;
66 struct ext4_super_block *es = sbi->s_es;
67
68 trace_ext4_update_sb(sb, bh->b_blocknr, 1);
69
70 BUFFER_TRACE(bh, "get_write_access");
71 err = ext4_journal_get_write_access(handle, sb,
72 bh,
73 EXT4_JTR_NONE);
74 if (err)
75 goto out_err;
76
77 lock_buffer(bh);
78 func(sbi, es, arg);
79 ext4_superblock_csum_set(sb);
80 unlock_buffer(bh);
81
82 if (buffer_write_io_error(bh) || !buffer_uptodate(bh)) {
83 ext4_msg(sbi->s_sb, KERN_ERR, "previous I/O error to "
84 "superblock detected");
85 clear_buffer_write_io_error(bh);
86 set_buffer_uptodate(bh);
87 }
88
89 err = ext4_handle_dirty_metadata(handle, NULL, bh);
90 if (err)
91 goto out_err;
92 err = sync_dirty_buffer(bh);
93 out_err:
94 ext4_std_error(sb, err);
95 return err;
96 }
97
98 /*
99 * Update one backup superblock in the group 'grp' using the callback
100 * function 'func' and argument 'arg'. If the handle is NULL the
101 * modification is not journalled.
102 *
103 * Returns: 0 when no modification was done (no superblock in the group)
104 * 1 when the modification was successful
105 * <0 on error
106 */
ext4_update_backup_sb(struct super_block * sb,handle_t * handle,ext4_group_t grp,ext4_update_sb_callback func,const void * arg)107 static int ext4_update_backup_sb(struct super_block *sb,
108 handle_t *handle, ext4_group_t grp,
109 ext4_update_sb_callback func, const void *arg)
110 {
111 int err = 0;
112 ext4_fsblk_t sb_block;
113 struct buffer_head *bh;
114 unsigned long offset = 0;
115 struct ext4_super_block *es;
116
117 if (!ext4_bg_has_super(sb, grp))
118 return 0;
119
120 /*
121 * For the group 0 there is always 1k padding, so we have
122 * either adjust offset, or sb_block depending on blocksize
123 */
124 if (grp == 0) {
125 sb_block = 1 * EXT4_MIN_BLOCK_SIZE;
126 offset = do_div(sb_block, sb->s_blocksize);
127 } else {
128 sb_block = ext4_group_first_block_no(sb, grp);
129 offset = 0;
130 }
131
132 trace_ext4_update_sb(sb, sb_block, handle ? 1 : 0);
133
134 bh = ext4_sb_bread(sb, sb_block, 0);
135 if (IS_ERR(bh))
136 return PTR_ERR(bh);
137
138 if (handle) {
139 BUFFER_TRACE(bh, "get_write_access");
140 err = ext4_journal_get_write_access(handle, sb,
141 bh,
142 EXT4_JTR_NONE);
143 if (err)
144 goto out_bh;
145 }
146
147 es = (struct ext4_super_block *) (bh->b_data + offset);
148 lock_buffer(bh);
149 if (ext4_has_feature_metadata_csum(sb) &&
150 es->s_checksum != ext4_superblock_csum(es)) {
151 ext4_msg(sb, KERN_ERR, "Invalid checksum for backup "
152 "superblock %llu", sb_block);
153 unlock_buffer(bh);
154 goto out_bh;
155 }
156 func(EXT4_SB(sb), es, arg);
157 if (ext4_has_feature_metadata_csum(sb))
158 es->s_checksum = ext4_superblock_csum(es);
159 set_buffer_uptodate(bh);
160 unlock_buffer(bh);
161
162 if (handle) {
163 err = ext4_handle_dirty_metadata(handle, NULL, bh);
164 if (err)
165 goto out_bh;
166 } else {
167 BUFFER_TRACE(bh, "marking dirty");
168 mark_buffer_dirty(bh);
169 }
170 err = sync_dirty_buffer(bh);
171
172 out_bh:
173 brelse(bh);
174 ext4_std_error(sb, err);
175 return (err) ? err : 1;
176 }
177
178 /*
179 * Update primary and backup superblocks using the provided function
180 * func and argument arg.
181 *
182 * Only the primary superblock and at most two backup superblock
183 * modifications are journalled; the rest is modified without journal.
184 * This is safe because e2fsck will re-write them if there is a problem,
185 * and we're very unlikely to ever need more than two backups.
186 */
187 static
ext4_update_superblocks_fn(struct super_block * sb,ext4_update_sb_callback func,const void * arg)188 int ext4_update_superblocks_fn(struct super_block *sb,
189 ext4_update_sb_callback func,
190 const void *arg)
191 {
192 handle_t *handle;
193 ext4_group_t ngroups;
194 unsigned int three = 1;
195 unsigned int five = 5;
196 unsigned int seven = 7;
197 int err = 0, ret, i;
198 ext4_group_t grp, primary_grp;
199 struct ext4_sb_info *sbi = EXT4_SB(sb);
200
201 /*
202 * We can't update superblocks while the online resize is running
203 */
204 if (test_and_set_bit_lock(EXT4_FLAGS_RESIZING,
205 &sbi->s_ext4_flags)) {
206 ext4_msg(sb, KERN_ERR, "Can't modify superblock while"
207 "performing online resize");
208 return -EBUSY;
209 }
210
211 /*
212 * We're only going to update primary superblock and two
213 * backup superblocks in this transaction.
214 */
215 handle = ext4_journal_start_sb(sb, EXT4_HT_MISC, 3);
216 if (IS_ERR(handle)) {
217 err = PTR_ERR(handle);
218 goto out;
219 }
220
221 /* Update primary superblock */
222 err = ext4_update_primary_sb(sb, handle, func, arg);
223 if (err) {
224 ext4_msg(sb, KERN_ERR, "Failed to update primary "
225 "superblock");
226 goto out_journal;
227 }
228
229 primary_grp = ext4_get_group_number(sb, sbi->s_sbh->b_blocknr);
230 ngroups = ext4_get_groups_count(sb);
231
232 /*
233 * Update backup superblocks. We have to start from group 0
234 * because it might not be where the primary superblock is
235 * if the fs is mounted with -o sb=<backup_sb_block>
236 */
237 i = 0;
238 grp = 0;
239 while (grp < ngroups) {
240 /* Skip primary superblock */
241 if (grp == primary_grp)
242 goto next_grp;
243
244 ret = ext4_update_backup_sb(sb, handle, grp, func, arg);
245 if (ret < 0) {
246 /* Ignore bad checksum; try to update next sb */
247 if (ret == -EFSBADCRC)
248 goto next_grp;
249 err = ret;
250 goto out_journal;
251 }
252
253 i += ret;
254 if (handle && i > 1) {
255 /*
256 * We're only journalling primary superblock and
257 * two backup superblocks; the rest is not
258 * journalled.
259 */
260 err = ext4_journal_stop(handle);
261 if (err)
262 goto out;
263 handle = NULL;
264 }
265 next_grp:
266 grp = ext4_list_backups(sb, &three, &five, &seven);
267 }
268
269 out_journal:
270 if (handle) {
271 ret = ext4_journal_stop(handle);
272 if (ret && !err)
273 err = ret;
274 }
275 out:
276 clear_bit_unlock(EXT4_FLAGS_RESIZING, &sbi->s_ext4_flags);
277 smp_mb__after_atomic();
278 return err ? err : 0;
279 }
280
281 /*
282 * Swap memory between @a and @b for @len bytes.
283 *
284 * @a: pointer to first memory area
285 * @b: pointer to second memory area
286 * @len: number of bytes to swap
287 *
288 */
memswap(void * a,void * b,size_t len)289 static void memswap(void *a, void *b, size_t len)
290 {
291 unsigned char *ap, *bp;
292
293 ap = (unsigned char *)a;
294 bp = (unsigned char *)b;
295 while (len-- > 0) {
296 swap(*ap, *bp);
297 ap++;
298 bp++;
299 }
300 }
301
302 /*
303 * Swap i_data and associated attributes between @inode1 and @inode2.
304 * This function is used for the primary swap between inode1 and inode2
305 * and also to revert this primary swap in case of errors.
306 *
307 * Therefore you have to make sure, that calling this method twice
308 * will revert all changes.
309 *
310 * @inode1: pointer to first inode
311 * @inode2: pointer to second inode
312 */
swap_inode_data(struct inode * inode1,struct inode * inode2)313 static void swap_inode_data(struct inode *inode1, struct inode *inode2)
314 {
315 loff_t isize;
316 struct ext4_inode_info *ei1;
317 struct ext4_inode_info *ei2;
318 unsigned long tmp;
319 struct timespec64 ts1, ts2;
320
321 ei1 = EXT4_I(inode1);
322 ei2 = EXT4_I(inode2);
323
324 swap(inode1->i_version, inode2->i_version);
325
326 ts1 = inode_get_atime(inode1);
327 ts2 = inode_get_atime(inode2);
328 inode_set_atime_to_ts(inode1, ts2);
329 inode_set_atime_to_ts(inode2, ts1);
330
331 ts1 = inode_get_mtime(inode1);
332 ts2 = inode_get_mtime(inode2);
333 inode_set_mtime_to_ts(inode1, ts2);
334 inode_set_mtime_to_ts(inode2, ts1);
335
336 memswap(ei1->i_data, ei2->i_data, sizeof(ei1->i_data));
337 tmp = ei1->i_flags & EXT4_FL_SHOULD_SWAP;
338 ei1->i_flags = (ei2->i_flags & EXT4_FL_SHOULD_SWAP) |
339 (ei1->i_flags & ~EXT4_FL_SHOULD_SWAP);
340 ei2->i_flags = tmp | (ei2->i_flags & ~EXT4_FL_SHOULD_SWAP);
341 swap(ei1->i_disksize, ei2->i_disksize);
342 ext4_es_remove_extent(inode1, 0, EXT_MAX_BLOCKS);
343 ext4_es_remove_extent(inode2, 0, EXT_MAX_BLOCKS);
344
345 isize = i_size_read(inode1);
346 i_size_write(inode1, i_size_read(inode2));
347 i_size_write(inode2, isize);
348 }
349
ext4_reset_inode_seed(struct inode * inode)350 void ext4_reset_inode_seed(struct inode *inode)
351 {
352 struct ext4_inode_info *ei = EXT4_I(inode);
353 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
354 __le32 inum = cpu_to_le32(inode->i_ino);
355 __le32 gen = cpu_to_le32(inode->i_generation);
356 __u32 csum;
357
358 if (!ext4_has_feature_metadata_csum(inode->i_sb))
359 return;
360
361 csum = ext4_chksum(sbi->s_csum_seed, (__u8 *)&inum, sizeof(inum));
362 ei->i_csum_seed = ext4_chksum(csum, (__u8 *)&gen, sizeof(gen));
363 }
364
365 /*
366 * Swap the information from the given @inode and the inode
367 * EXT4_BOOT_LOADER_INO. It will basically swap i_data and all other
368 * important fields of the inodes.
369 *
370 * @sb: the super block of the filesystem
371 * @idmap: idmap of the mount the inode was found from
372 * @inode: the inode to swap with EXT4_BOOT_LOADER_INO
373 *
374 */
swap_inode_boot_loader(struct super_block * sb,struct mnt_idmap * idmap,struct inode * inode)375 static long swap_inode_boot_loader(struct super_block *sb,
376 struct mnt_idmap *idmap,
377 struct inode *inode)
378 {
379 handle_t *handle;
380 int err;
381 struct inode *inode_bl;
382 struct ext4_inode_info *ei_bl;
383 qsize_t size, size_bl, diff;
384 blkcnt_t blocks;
385 unsigned short bytes;
386
387 inode_bl = ext4_iget(sb, EXT4_BOOT_LOADER_INO,
388 EXT4_IGET_SPECIAL | EXT4_IGET_BAD);
389 if (IS_ERR(inode_bl))
390 return PTR_ERR(inode_bl);
391 ei_bl = EXT4_I(inode_bl);
392
393 /* Protect orig inodes against a truncate and make sure,
394 * that only 1 swap_inode_boot_loader is running. */
395 lock_two_nondirectories(inode, inode_bl);
396
397 if (inode->i_nlink != 1 || !S_ISREG(inode->i_mode) ||
398 IS_SWAPFILE(inode) || IS_ENCRYPTED(inode) ||
399 (EXT4_I(inode)->i_flags & EXT4_JOURNAL_DATA_FL) ||
400 ext4_has_inline_data(inode)) {
401 err = -EINVAL;
402 goto journal_err_out;
403 }
404
405 if (IS_RDONLY(inode) || IS_APPEND(inode) || IS_IMMUTABLE(inode) ||
406 !inode_owner_or_capable(idmap, inode) ||
407 !capable(CAP_SYS_ADMIN)) {
408 err = -EPERM;
409 goto journal_err_out;
410 }
411
412 filemap_invalidate_lock(inode->i_mapping);
413 err = filemap_write_and_wait(inode->i_mapping);
414 if (err)
415 goto err_out;
416
417 err = filemap_write_and_wait(inode_bl->i_mapping);
418 if (err)
419 goto err_out;
420
421 /* Wait for all existing dio workers */
422 inode_dio_wait(inode);
423 inode_dio_wait(inode_bl);
424
425 truncate_inode_pages(&inode->i_data, 0);
426 truncate_inode_pages(&inode_bl->i_data, 0);
427
428 handle = ext4_journal_start(inode_bl, EXT4_HT_MOVE_EXTENTS, 2);
429 if (IS_ERR(handle)) {
430 err = -EINVAL;
431 goto err_out;
432 }
433 ext4_fc_mark_ineligible(sb, EXT4_FC_REASON_SWAP_BOOT, handle);
434
435 /* Protect extent tree against block allocations via delalloc */
436 ext4_double_down_write_data_sem(inode, inode_bl);
437
438 if (is_bad_inode(inode_bl) || !S_ISREG(inode_bl->i_mode)) {
439 /* this inode has never been used as a BOOT_LOADER */
440 set_nlink(inode_bl, 1);
441 i_uid_write(inode_bl, 0);
442 i_gid_write(inode_bl, 0);
443 inode_bl->i_flags = 0;
444 ei_bl->i_flags = 0;
445 inode_set_iversion(inode_bl, 1);
446 i_size_write(inode_bl, 0);
447 EXT4_I(inode_bl)->i_disksize = inode_bl->i_size;
448 inode_bl->i_mode = S_IFREG;
449 if (ext4_has_feature_extents(sb)) {
450 ext4_set_inode_flag(inode_bl, EXT4_INODE_EXTENTS);
451 ext4_ext_tree_init(handle, inode_bl);
452 } else
453 memset(ei_bl->i_data, 0, sizeof(ei_bl->i_data));
454 }
455
456 err = dquot_initialize(inode);
457 if (err)
458 goto err_out1;
459
460 size = (qsize_t)(inode->i_blocks) * (1 << 9) + inode->i_bytes;
461 size_bl = (qsize_t)(inode_bl->i_blocks) * (1 << 9) + inode_bl->i_bytes;
462 diff = size - size_bl;
463 swap_inode_data(inode, inode_bl);
464
465 inode_set_ctime_current(inode);
466 inode_set_ctime_current(inode_bl);
467 inode_inc_iversion(inode);
468
469 inode->i_generation = get_random_u32();
470 inode_bl->i_generation = get_random_u32();
471 ext4_reset_inode_seed(inode);
472 ext4_reset_inode_seed(inode_bl);
473
474 ext4_discard_preallocations(inode);
475
476 err = ext4_mark_inode_dirty(handle, inode);
477 if (err < 0) {
478 /* No need to update quota information. */
479 ext4_warning(inode->i_sb,
480 "couldn't mark inode #%lu dirty (err %d)",
481 inode->i_ino, err);
482 /* Revert all changes: */
483 swap_inode_data(inode, inode_bl);
484 ext4_mark_inode_dirty(handle, inode);
485 goto err_out1;
486 }
487
488 blocks = inode_bl->i_blocks;
489 bytes = inode_bl->i_bytes;
490 inode_bl->i_blocks = inode->i_blocks;
491 inode_bl->i_bytes = inode->i_bytes;
492 err = ext4_mark_inode_dirty(handle, inode_bl);
493 if (err < 0) {
494 /* No need to update quota information. */
495 ext4_warning(inode_bl->i_sb,
496 "couldn't mark inode #%lu dirty (err %d)",
497 inode_bl->i_ino, err);
498 goto revert;
499 }
500
501 /* Bootloader inode should not be counted into quota information. */
502 if (diff > 0)
503 dquot_free_space(inode, diff);
504 else
505 err = dquot_alloc_space(inode, -1 * diff);
506
507 if (err < 0) {
508 revert:
509 /* Revert all changes: */
510 inode_bl->i_blocks = blocks;
511 inode_bl->i_bytes = bytes;
512 swap_inode_data(inode, inode_bl);
513 ext4_mark_inode_dirty(handle, inode);
514 ext4_mark_inode_dirty(handle, inode_bl);
515 }
516
517 err_out1:
518 ext4_journal_stop(handle);
519 ext4_double_up_write_data_sem(inode, inode_bl);
520
521 err_out:
522 filemap_invalidate_unlock(inode->i_mapping);
523 journal_err_out:
524 unlock_two_nondirectories(inode, inode_bl);
525 iput(inode_bl);
526 return err;
527 }
528
529 /*
530 * If immutable is set and we are not clearing it, we're not allowed to change
531 * anything else in the inode. Don't error out if we're only trying to set
532 * immutable on an immutable file.
533 */
ext4_ioctl_check_immutable(struct inode * inode,__u32 new_projid,unsigned int flags)534 static int ext4_ioctl_check_immutable(struct inode *inode, __u32 new_projid,
535 unsigned int flags)
536 {
537 struct ext4_inode_info *ei = EXT4_I(inode);
538 unsigned int oldflags = ei->i_flags;
539
540 if (!(oldflags & EXT4_IMMUTABLE_FL) || !(flags & EXT4_IMMUTABLE_FL))
541 return 0;
542
543 if ((oldflags & ~EXT4_IMMUTABLE_FL) != (flags & ~EXT4_IMMUTABLE_FL))
544 return -EPERM;
545 if (ext4_has_feature_project(inode->i_sb) &&
546 __kprojid_val(ei->i_projid) != new_projid)
547 return -EPERM;
548
549 return 0;
550 }
551
ext4_dax_dontcache(struct inode * inode,unsigned int flags)552 static void ext4_dax_dontcache(struct inode *inode, unsigned int flags)
553 {
554 struct ext4_inode_info *ei = EXT4_I(inode);
555
556 if (S_ISDIR(inode->i_mode))
557 return;
558
559 if (test_opt2(inode->i_sb, DAX_NEVER) ||
560 test_opt(inode->i_sb, DAX_ALWAYS))
561 return;
562
563 if ((ei->i_flags ^ flags) & EXT4_DAX_FL)
564 d_mark_dontcache(inode);
565 }
566
dax_compatible(struct inode * inode,unsigned int oldflags,unsigned int flags)567 static bool dax_compatible(struct inode *inode, unsigned int oldflags,
568 unsigned int flags)
569 {
570 /* Allow the DAX flag to be changed on inline directories */
571 if (S_ISDIR(inode->i_mode)) {
572 flags &= ~EXT4_INLINE_DATA_FL;
573 oldflags &= ~EXT4_INLINE_DATA_FL;
574 }
575
576 if (flags & EXT4_DAX_FL) {
577 if ((oldflags & EXT4_DAX_MUT_EXCL) ||
578 ext4_test_inode_state(inode,
579 EXT4_STATE_VERITY_IN_PROGRESS)) {
580 return false;
581 }
582 }
583
584 if ((flags & EXT4_DAX_MUT_EXCL) && (oldflags & EXT4_DAX_FL))
585 return false;
586
587 return true;
588 }
589
ext4_ioctl_setflags(struct inode * inode,unsigned int flags)590 static int ext4_ioctl_setflags(struct inode *inode,
591 unsigned int flags)
592 {
593 struct ext4_inode_info *ei = EXT4_I(inode);
594 handle_t *handle = NULL;
595 int err = -EPERM, migrate = 0;
596 struct ext4_iloc iloc;
597 unsigned int oldflags, mask, i;
598 struct super_block *sb = inode->i_sb;
599
600 /* Is it quota file? Do not allow user to mess with it */
601 if (ext4_is_quota_file(inode))
602 goto flags_out;
603
604 oldflags = ei->i_flags;
605 /*
606 * The JOURNAL_DATA flag can only be changed by
607 * the relevant capability.
608 */
609 if ((flags ^ oldflags) & (EXT4_JOURNAL_DATA_FL)) {
610 if (!capable(CAP_SYS_RESOURCE))
611 goto flags_out;
612 }
613
614 if (!dax_compatible(inode, oldflags, flags)) {
615 err = -EOPNOTSUPP;
616 goto flags_out;
617 }
618
619 if ((flags ^ oldflags) & EXT4_EXTENTS_FL)
620 migrate = 1;
621
622 if ((flags ^ oldflags) & EXT4_CASEFOLD_FL) {
623 if (!ext4_has_feature_casefold(sb)) {
624 err = -EOPNOTSUPP;
625 goto flags_out;
626 }
627
628 if (!S_ISDIR(inode->i_mode)) {
629 err = -ENOTDIR;
630 goto flags_out;
631 }
632
633 if (!ext4_empty_dir(inode)) {
634 err = -ENOTEMPTY;
635 goto flags_out;
636 }
637 }
638
639 /*
640 * Wait for all pending directio and then flush all the dirty pages
641 * for this file. The flush marks all the pages readonly, so any
642 * subsequent attempt to write to the file (particularly mmap pages)
643 * will come through the filesystem and fail.
644 */
645 if (S_ISREG(inode->i_mode) && !IS_IMMUTABLE(inode) &&
646 (flags & EXT4_IMMUTABLE_FL)) {
647 inode_dio_wait(inode);
648 err = filemap_write_and_wait(inode->i_mapping);
649 if (err)
650 goto flags_out;
651 }
652
653 handle = ext4_journal_start(inode, EXT4_HT_INODE, 1);
654 if (IS_ERR(handle)) {
655 err = PTR_ERR(handle);
656 goto flags_out;
657 }
658 if (IS_SYNC(inode))
659 ext4_handle_sync(handle);
660 err = ext4_reserve_inode_write(handle, inode, &iloc);
661 if (err)
662 goto flags_err;
663
664 ext4_dax_dontcache(inode, flags);
665
666 for (i = 0, mask = 1; i < 32; i++, mask <<= 1) {
667 if (!(mask & EXT4_FL_USER_MODIFIABLE))
668 continue;
669 /* These flags get special treatment later */
670 if (mask == EXT4_JOURNAL_DATA_FL || mask == EXT4_EXTENTS_FL)
671 continue;
672 if (mask & flags)
673 ext4_set_inode_flag(inode, i);
674 else
675 ext4_clear_inode_flag(inode, i);
676 }
677
678 ext4_set_inode_flags(inode, false);
679
680 inode_set_ctime_current(inode);
681 inode_inc_iversion(inode);
682
683 err = ext4_mark_iloc_dirty(handle, inode, &iloc);
684 flags_err:
685 ext4_journal_stop(handle);
686 if (err)
687 goto flags_out;
688
689 if ((flags ^ oldflags) & (EXT4_JOURNAL_DATA_FL)) {
690 /*
691 * Changes to the journaling mode can cause unsafe changes to
692 * S_DAX if the inode is DAX
693 */
694 if (IS_DAX(inode)) {
695 err = -EBUSY;
696 goto flags_out;
697 }
698
699 err = ext4_change_inode_journal_flag(inode,
700 flags & EXT4_JOURNAL_DATA_FL);
701 if (err)
702 goto flags_out;
703 }
704 if (migrate) {
705 if (flags & EXT4_EXTENTS_FL)
706 err = ext4_ext_migrate(inode);
707 else
708 err = ext4_ind_migrate(inode);
709 }
710
711 flags_out:
712 return err;
713 }
714
715 #ifdef CONFIG_QUOTA
ext4_ioctl_setproject(struct inode * inode,__u32 projid)716 static int ext4_ioctl_setproject(struct inode *inode, __u32 projid)
717 {
718 struct super_block *sb = inode->i_sb;
719 struct ext4_inode_info *ei = EXT4_I(inode);
720 int err, rc;
721 handle_t *handle;
722 kprojid_t kprojid;
723 struct ext4_iloc iloc;
724 struct ext4_inode *raw_inode;
725 struct dquot *transfer_to[MAXQUOTAS] = { };
726
727 if (!ext4_has_feature_project(sb)) {
728 if (projid != EXT4_DEF_PROJID)
729 return -EOPNOTSUPP;
730 else
731 return 0;
732 }
733
734 if (EXT4_INODE_SIZE(sb) <= EXT4_GOOD_OLD_INODE_SIZE)
735 return -EOPNOTSUPP;
736
737 kprojid = make_kprojid(&init_user_ns, (projid_t)projid);
738
739 if (projid_eq(kprojid, EXT4_I(inode)->i_projid))
740 return 0;
741
742 err = -EPERM;
743 /* Is it quota file? Do not allow user to mess with it */
744 if (ext4_is_quota_file(inode))
745 return err;
746
747 err = dquot_initialize(inode);
748 if (err)
749 return err;
750
751 err = ext4_get_inode_loc(inode, &iloc);
752 if (err)
753 return err;
754
755 raw_inode = ext4_raw_inode(&iloc);
756 if (!EXT4_FITS_IN_INODE(raw_inode, ei, i_projid)) {
757 err = ext4_expand_extra_isize(inode,
758 EXT4_SB(sb)->s_want_extra_isize,
759 &iloc);
760 if (err)
761 return err;
762 } else {
763 brelse(iloc.bh);
764 }
765
766 handle = ext4_journal_start(inode, EXT4_HT_QUOTA,
767 EXT4_QUOTA_INIT_BLOCKS(sb) +
768 EXT4_QUOTA_DEL_BLOCKS(sb) + 3);
769 if (IS_ERR(handle))
770 return PTR_ERR(handle);
771
772 err = ext4_reserve_inode_write(handle, inode, &iloc);
773 if (err)
774 goto out_stop;
775
776 transfer_to[PRJQUOTA] = dqget(sb, make_kqid_projid(kprojid));
777 if (!IS_ERR(transfer_to[PRJQUOTA])) {
778
779 /* __dquot_transfer() calls back ext4_get_inode_usage() which
780 * counts xattr inode references.
781 */
782 down_read(&EXT4_I(inode)->xattr_sem);
783 err = __dquot_transfer(inode, transfer_to);
784 up_read(&EXT4_I(inode)->xattr_sem);
785 dqput(transfer_to[PRJQUOTA]);
786 if (err)
787 goto out_dirty;
788 }
789
790 EXT4_I(inode)->i_projid = kprojid;
791 inode_set_ctime_current(inode);
792 inode_inc_iversion(inode);
793 out_dirty:
794 rc = ext4_mark_iloc_dirty(handle, inode, &iloc);
795 if (!err)
796 err = rc;
797 out_stop:
798 ext4_journal_stop(handle);
799 return err;
800 }
801 #else
ext4_ioctl_setproject(struct inode * inode,__u32 projid)802 static int ext4_ioctl_setproject(struct inode *inode, __u32 projid)
803 {
804 if (projid != EXT4_DEF_PROJID)
805 return -EOPNOTSUPP;
806 return 0;
807 }
808 #endif
809
ext4_force_shutdown(struct super_block * sb,u32 flags)810 int ext4_force_shutdown(struct super_block *sb, u32 flags)
811 {
812 struct ext4_sb_info *sbi = EXT4_SB(sb);
813 int ret;
814
815 if (flags > EXT4_GOING_FLAGS_NOLOGFLUSH)
816 return -EINVAL;
817
818 if (ext4_forced_shutdown(sb))
819 return 0;
820
821 ext4_msg(sb, KERN_ALERT, "shut down requested (%d)", flags);
822 trace_ext4_shutdown(sb, flags);
823
824 switch (flags) {
825 case EXT4_GOING_FLAGS_DEFAULT:
826 ret = bdev_freeze(sb->s_bdev);
827 if (ret)
828 return ret;
829 set_bit(EXT4_FLAGS_SHUTDOWN, &sbi->s_ext4_flags);
830 bdev_thaw(sb->s_bdev);
831 break;
832 case EXT4_GOING_FLAGS_LOGFLUSH:
833 set_bit(EXT4_FLAGS_SHUTDOWN, &sbi->s_ext4_flags);
834 if (sbi->s_journal && !is_journal_aborted(sbi->s_journal)) {
835 (void) ext4_force_commit(sb);
836 jbd2_journal_abort(sbi->s_journal, -ESHUTDOWN);
837 }
838 break;
839 case EXT4_GOING_FLAGS_NOLOGFLUSH:
840 set_bit(EXT4_FLAGS_SHUTDOWN, &sbi->s_ext4_flags);
841 if (sbi->s_journal && !is_journal_aborted(sbi->s_journal))
842 jbd2_journal_abort(sbi->s_journal, -ESHUTDOWN);
843 break;
844 default:
845 return -EINVAL;
846 }
847 clear_opt(sb, DISCARD);
848 fserror_report_shutdown(sb, GFP_KERNEL);
849 return 0;
850 }
851
ext4_ioctl_shutdown(struct super_block * sb,unsigned long arg)852 static int ext4_ioctl_shutdown(struct super_block *sb, unsigned long arg)
853 {
854 u32 flags;
855
856 if (!capable(CAP_SYS_ADMIN))
857 return -EPERM;
858
859 if (get_user(flags, (__u32 __user *)arg))
860 return -EFAULT;
861
862 return ext4_force_shutdown(sb, flags);
863 }
864
865 struct getfsmap_info {
866 struct super_block *gi_sb;
867 struct fsmap_head __user *gi_data;
868 unsigned int gi_idx;
869 __u32 gi_last_flags;
870 };
871
ext4_getfsmap_format(struct ext4_fsmap * xfm,void * priv)872 static int ext4_getfsmap_format(struct ext4_fsmap *xfm, void *priv)
873 {
874 struct getfsmap_info *info = priv;
875 struct fsmap fm;
876
877 trace_ext4_getfsmap_mapping(info->gi_sb, xfm);
878
879 info->gi_last_flags = xfm->fmr_flags;
880 ext4_fsmap_from_internal(info->gi_sb, &fm, xfm);
881 if (copy_to_user(&info->gi_data->fmh_recs[info->gi_idx++], &fm,
882 sizeof(struct fsmap)))
883 return -EFAULT;
884
885 return 0;
886 }
887
ext4_ioc_getfsmap(struct super_block * sb,struct fsmap_head __user * arg)888 static int ext4_ioc_getfsmap(struct super_block *sb,
889 struct fsmap_head __user *arg)
890 {
891 struct getfsmap_info info = { NULL };
892 struct ext4_fsmap_head xhead = {0};
893 struct fsmap_head head;
894 bool aborted = false;
895 int error;
896
897 if (copy_from_user(&head, arg, sizeof(struct fsmap_head)))
898 return -EFAULT;
899 if (memchr_inv(head.fmh_reserved, 0, sizeof(head.fmh_reserved)) ||
900 memchr_inv(head.fmh_keys[0].fmr_reserved, 0,
901 sizeof(head.fmh_keys[0].fmr_reserved)) ||
902 memchr_inv(head.fmh_keys[1].fmr_reserved, 0,
903 sizeof(head.fmh_keys[1].fmr_reserved)))
904 return -EINVAL;
905 /*
906 * ext4 doesn't report file extents at all, so the only valid
907 * file offsets are the magic ones (all zeroes or all ones).
908 */
909 if (head.fmh_keys[0].fmr_offset ||
910 (head.fmh_keys[1].fmr_offset != 0 &&
911 head.fmh_keys[1].fmr_offset != -1ULL))
912 return -EINVAL;
913
914 xhead.fmh_iflags = head.fmh_iflags;
915 xhead.fmh_count = head.fmh_count;
916 ext4_fsmap_to_internal(sb, &xhead.fmh_keys[0], &head.fmh_keys[0]);
917 ext4_fsmap_to_internal(sb, &xhead.fmh_keys[1], &head.fmh_keys[1]);
918
919 trace_ext4_getfsmap_low_key(sb, &xhead.fmh_keys[0]);
920 trace_ext4_getfsmap_high_key(sb, &xhead.fmh_keys[1]);
921
922 info.gi_sb = sb;
923 info.gi_data = arg;
924 error = ext4_getfsmap(sb, &xhead, ext4_getfsmap_format, &info);
925 if (error == EXT4_QUERY_RANGE_ABORT)
926 aborted = true;
927 else if (error)
928 return error;
929
930 /* If we didn't abort, set the "last" flag in the last fmx */
931 if (!aborted && info.gi_idx) {
932 info.gi_last_flags |= FMR_OF_LAST;
933 if (copy_to_user(&info.gi_data->fmh_recs[info.gi_idx - 1].fmr_flags,
934 &info.gi_last_flags,
935 sizeof(info.gi_last_flags)))
936 return -EFAULT;
937 }
938
939 /* copy back header */
940 head.fmh_entries = xhead.fmh_entries;
941 head.fmh_oflags = xhead.fmh_oflags;
942 if (copy_to_user(arg, &head, sizeof(struct fsmap_head)))
943 return -EFAULT;
944
945 return 0;
946 }
947
ext4_ioctl_group_add(struct file * file,struct ext4_new_group_data * input)948 static long ext4_ioctl_group_add(struct file *file,
949 struct ext4_new_group_data *input)
950 {
951 struct super_block *sb = file_inode(file)->i_sb;
952 int err, err2=0;
953
954 err = ext4_resize_begin(sb);
955 if (err)
956 return err;
957
958 if (ext4_has_feature_bigalloc(sb)) {
959 ext4_msg(sb, KERN_ERR,
960 "Online resizing not supported with bigalloc");
961 err = -EOPNOTSUPP;
962 goto group_add_out;
963 }
964
965 err = mnt_want_write_file(file);
966 if (err)
967 goto group_add_out;
968
969 err = ext4_group_add(sb, input);
970 if (EXT4_SB(sb)->s_journal) {
971 ext4_fc_mark_ineligible(sb, EXT4_FC_REASON_RESIZE, NULL);
972 jbd2_journal_lock_updates(EXT4_SB(sb)->s_journal);
973 err2 = jbd2_journal_flush(EXT4_SB(sb)->s_journal, 0);
974 jbd2_journal_unlock_updates(EXT4_SB(sb)->s_journal);
975 }
976 if (err == 0)
977 err = err2;
978 mnt_drop_write_file(file);
979 if (!err && ext4_has_group_desc_csum(sb) &&
980 test_opt(sb, INIT_INODE_TABLE))
981 err = ext4_register_li_request(sb, input->group);
982 group_add_out:
983 err2 = ext4_resize_end(sb, false);
984 if (err == 0)
985 err = err2;
986 return err;
987 }
988
ext4_fileattr_get(struct dentry * dentry,struct file_kattr * fa)989 int ext4_fileattr_get(struct dentry *dentry, struct file_kattr *fa)
990 {
991 struct inode *inode = d_inode(dentry);
992 struct ext4_inode_info *ei = EXT4_I(inode);
993 u32 flags = ei->i_flags & EXT4_FL_USER_VISIBLE;
994
995 if (S_ISREG(inode->i_mode))
996 flags &= ~FS_PROJINHERIT_FL;
997
998 fileattr_fill_flags(fa, flags);
999 if (ext4_has_feature_project(inode->i_sb))
1000 fa->fsx_projid = from_kprojid(&init_user_ns, ei->i_projid);
1001
1002 return 0;
1003 }
1004
ext4_fileattr_set(struct mnt_idmap * idmap,struct dentry * dentry,struct file_kattr * fa)1005 int ext4_fileattr_set(struct mnt_idmap *idmap,
1006 struct dentry *dentry, struct file_kattr *fa)
1007 {
1008 struct inode *inode = d_inode(dentry);
1009 u32 flags = fa->flags;
1010 int err = -EOPNOTSUPP;
1011
1012 if (flags & ~EXT4_FL_USER_VISIBLE)
1013 goto out;
1014
1015 /*
1016 * chattr(1) grabs flags via GETFLAGS, modifies the result and
1017 * passes that to SETFLAGS. So we cannot easily make SETFLAGS
1018 * more restrictive than just silently masking off visible but
1019 * not settable flags as we always did.
1020 */
1021 flags &= EXT4_FL_USER_MODIFIABLE;
1022 if (ext4_mask_flags(inode->i_mode, flags) != flags)
1023 goto out;
1024 err = ext4_ioctl_check_immutable(inode, fa->fsx_projid, flags);
1025 if (err)
1026 goto out;
1027 err = ext4_ioctl_setflags(inode, flags);
1028 if (err)
1029 goto out;
1030 err = ext4_ioctl_setproject(inode, fa->fsx_projid);
1031 out:
1032 return err;
1033 }
1034
1035 /* So that the fiemap access checks can't overflow on 32 bit machines. */
1036 #define FIEMAP_MAX_EXTENTS (UINT_MAX / sizeof(struct fiemap_extent))
1037
ext4_ioctl_get_es_cache(struct file * filp,unsigned long arg)1038 static int ext4_ioctl_get_es_cache(struct file *filp, unsigned long arg)
1039 {
1040 struct fiemap fiemap;
1041 struct fiemap __user *ufiemap = (struct fiemap __user *) arg;
1042 struct fiemap_extent_info fieinfo = { 0, };
1043 struct inode *inode = file_inode(filp);
1044 int error;
1045
1046 if (copy_from_user(&fiemap, ufiemap, sizeof(fiemap)))
1047 return -EFAULT;
1048
1049 if (fiemap.fm_extent_count > FIEMAP_MAX_EXTENTS)
1050 return -EINVAL;
1051
1052 fieinfo.fi_flags = fiemap.fm_flags;
1053 fieinfo.fi_extents_max = fiemap.fm_extent_count;
1054 fieinfo.fi_extents_start = ufiemap->fm_extents;
1055
1056 error = ext4_get_es_cache(inode, &fieinfo, fiemap.fm_start,
1057 fiemap.fm_length);
1058 fiemap.fm_flags = fieinfo.fi_flags;
1059 fiemap.fm_mapped_extents = fieinfo.fi_extents_mapped;
1060 if (copy_to_user(ufiemap, &fiemap, sizeof(fiemap)))
1061 error = -EFAULT;
1062
1063 return error;
1064 }
1065
ext4_ioctl_checkpoint(struct file * filp,unsigned long arg)1066 static int ext4_ioctl_checkpoint(struct file *filp, unsigned long arg)
1067 {
1068 int err = 0;
1069 __u32 flags = 0;
1070 unsigned int flush_flags = 0;
1071 struct super_block *sb = file_inode(filp)->i_sb;
1072
1073 if (copy_from_user(&flags, (__u32 __user *)arg,
1074 sizeof(__u32)))
1075 return -EFAULT;
1076
1077 if (!capable(CAP_SYS_ADMIN))
1078 return -EPERM;
1079
1080 /* check for invalid bits set */
1081 if ((flags & ~EXT4_IOC_CHECKPOINT_FLAG_VALID) ||
1082 ((flags & JBD2_JOURNAL_FLUSH_DISCARD) &&
1083 (flags & JBD2_JOURNAL_FLUSH_ZEROOUT)))
1084 return -EINVAL;
1085
1086 if (!EXT4_SB(sb)->s_journal)
1087 return -ENODEV;
1088
1089 if ((flags & JBD2_JOURNAL_FLUSH_DISCARD) &&
1090 !bdev_max_discard_sectors(EXT4_SB(sb)->s_journal->j_dev))
1091 return -EOPNOTSUPP;
1092
1093 if (flags & EXT4_IOC_CHECKPOINT_FLAG_DRY_RUN)
1094 return 0;
1095
1096 if (flags & EXT4_IOC_CHECKPOINT_FLAG_DISCARD)
1097 flush_flags |= JBD2_JOURNAL_FLUSH_DISCARD;
1098
1099 if (flags & EXT4_IOC_CHECKPOINT_FLAG_ZEROOUT) {
1100 flush_flags |= JBD2_JOURNAL_FLUSH_ZEROOUT;
1101 pr_info_ratelimited("warning: checkpointing journal with EXT4_IOC_CHECKPOINT_FLAG_ZEROOUT can be slow");
1102 }
1103
1104 jbd2_journal_lock_updates(EXT4_SB(sb)->s_journal);
1105 err = jbd2_journal_flush(EXT4_SB(sb)->s_journal, flush_flags);
1106 jbd2_journal_unlock_updates(EXT4_SB(sb)->s_journal);
1107
1108 return err;
1109 }
1110
ext4_ioctl_setlabel(struct file * filp,const char __user * user_label)1111 static int ext4_ioctl_setlabel(struct file *filp, const char __user *user_label)
1112 {
1113 size_t len;
1114 int ret = 0;
1115 char new_label[EXT4_LABEL_MAX + 1];
1116 struct super_block *sb = file_inode(filp)->i_sb;
1117
1118 if (!capable(CAP_SYS_ADMIN))
1119 return -EPERM;
1120
1121 /*
1122 * Copy the maximum length allowed for ext4 label with one more to
1123 * find the required terminating null byte in order to test the
1124 * label length. The on disk label doesn't need to be null terminated.
1125 */
1126 if (copy_from_user(new_label, user_label, EXT4_LABEL_MAX + 1))
1127 return -EFAULT;
1128
1129 len = strnlen(new_label, EXT4_LABEL_MAX + 1);
1130 if (len > EXT4_LABEL_MAX)
1131 return -EINVAL;
1132
1133 /*
1134 * Clear the buffer after the new label
1135 */
1136 memset(new_label + len, 0, EXT4_LABEL_MAX - len);
1137
1138 ret = mnt_want_write_file(filp);
1139 if (ret)
1140 return ret;
1141
1142 ret = ext4_update_superblocks_fn(sb, ext4_sb_setlabel, new_label);
1143
1144 mnt_drop_write_file(filp);
1145 return ret;
1146 }
1147
ext4_ioctl_getlabel(struct ext4_sb_info * sbi,char __user * user_label)1148 static int ext4_ioctl_getlabel(struct ext4_sb_info *sbi, char __user *user_label)
1149 {
1150 char label[EXT4_LABEL_MAX + 1];
1151
1152 /*
1153 * EXT4_LABEL_MAX must always be smaller than FSLABEL_MAX because
1154 * FSLABEL_MAX must include terminating null byte, while s_volume_name
1155 * does not have to.
1156 */
1157 BUILD_BUG_ON(EXT4_LABEL_MAX >= FSLABEL_MAX);
1158
1159 lock_buffer(sbi->s_sbh);
1160 memtostr_pad(label, sbi->s_es->s_volume_name);
1161 unlock_buffer(sbi->s_sbh);
1162
1163 if (copy_to_user(user_label, label, sizeof(label)))
1164 return -EFAULT;
1165 return 0;
1166 }
1167
ext4_ioctl_getuuid(struct ext4_sb_info * sbi,struct fsuuid __user * ufsuuid)1168 static int ext4_ioctl_getuuid(struct ext4_sb_info *sbi,
1169 struct fsuuid __user *ufsuuid)
1170 {
1171 struct fsuuid fsuuid;
1172 __u8 uuid[UUID_SIZE];
1173
1174 if (copy_from_user(&fsuuid, ufsuuid, sizeof(fsuuid)))
1175 return -EFAULT;
1176
1177 if (fsuuid.fsu_len == 0) {
1178 fsuuid.fsu_len = UUID_SIZE;
1179 if (copy_to_user(&ufsuuid->fsu_len, &fsuuid.fsu_len,
1180 sizeof(fsuuid.fsu_len)))
1181 return -EFAULT;
1182 return 0;
1183 }
1184
1185 if (fsuuid.fsu_len < UUID_SIZE || fsuuid.fsu_flags != 0)
1186 return -EINVAL;
1187
1188 lock_buffer(sbi->s_sbh);
1189 memcpy(uuid, sbi->s_es->s_uuid, UUID_SIZE);
1190 unlock_buffer(sbi->s_sbh);
1191
1192 fsuuid.fsu_len = UUID_SIZE;
1193 if (copy_to_user(ufsuuid, &fsuuid, sizeof(fsuuid)) ||
1194 copy_to_user(&ufsuuid->fsu_uuid[0], uuid, UUID_SIZE))
1195 return -EFAULT;
1196 return 0;
1197 }
1198
ext4_ioctl_setuuid(struct file * filp,const struct fsuuid __user * ufsuuid)1199 static int ext4_ioctl_setuuid(struct file *filp,
1200 const struct fsuuid __user *ufsuuid)
1201 {
1202 int ret = 0;
1203 struct super_block *sb = file_inode(filp)->i_sb;
1204 struct fsuuid fsuuid;
1205 __u8 uuid[UUID_SIZE];
1206
1207 if (!capable(CAP_SYS_ADMIN))
1208 return -EPERM;
1209
1210 /*
1211 * If any checksums (group descriptors or metadata) are being used
1212 * then the checksum seed feature is required to change the UUID.
1213 */
1214 if (((ext4_has_feature_gdt_csum(sb) ||
1215 ext4_has_feature_metadata_csum(sb))
1216 && !ext4_has_feature_csum_seed(sb))
1217 || ext4_has_feature_stable_inodes(sb))
1218 return -EOPNOTSUPP;
1219
1220 if (copy_from_user(&fsuuid, ufsuuid, sizeof(fsuuid)))
1221 return -EFAULT;
1222
1223 if (fsuuid.fsu_len != UUID_SIZE || fsuuid.fsu_flags != 0)
1224 return -EINVAL;
1225
1226 if (copy_from_user(uuid, &ufsuuid->fsu_uuid[0], UUID_SIZE))
1227 return -EFAULT;
1228
1229 ret = mnt_want_write_file(filp);
1230 if (ret)
1231 return ret;
1232
1233 ret = ext4_update_superblocks_fn(sb, ext4_sb_setuuid, &uuid);
1234 mnt_drop_write_file(filp);
1235
1236 return ret;
1237 }
1238
1239
1240 #define TUNE_OPS_SUPPORTED (EXT4_TUNE_FL_ERRORS_BEHAVIOR | \
1241 EXT4_TUNE_FL_MNT_COUNT | EXT4_TUNE_FL_MAX_MNT_COUNT | \
1242 EXT4_TUNE_FL_CHECKINTRVAL | EXT4_TUNE_FL_LAST_CHECK_TIME | \
1243 EXT4_TUNE_FL_RESERVED_BLOCKS | EXT4_TUNE_FL_RESERVED_UID | \
1244 EXT4_TUNE_FL_RESERVED_GID | EXT4_TUNE_FL_DEFAULT_MNT_OPTS | \
1245 EXT4_TUNE_FL_DEF_HASH_ALG | EXT4_TUNE_FL_RAID_STRIDE | \
1246 EXT4_TUNE_FL_RAID_STRIPE_WIDTH | EXT4_TUNE_FL_MOUNT_OPTS | \
1247 EXT4_TUNE_FL_FEATURES | EXT4_TUNE_FL_EDIT_FEATURES | \
1248 EXT4_TUNE_FL_FORCE_FSCK | EXT4_TUNE_FL_ENCODING | \
1249 EXT4_TUNE_FL_ENCODING_FLAGS)
1250
1251 #define EXT4_TUNE_SET_COMPAT_SUPP \
1252 (EXT4_FEATURE_COMPAT_DIR_INDEX | \
1253 EXT4_FEATURE_COMPAT_STABLE_INODES)
1254 #define EXT4_TUNE_SET_INCOMPAT_SUPP \
1255 (EXT4_FEATURE_INCOMPAT_EXTENTS | \
1256 EXT4_FEATURE_INCOMPAT_EA_INODE | \
1257 EXT4_FEATURE_INCOMPAT_ENCRYPT | \
1258 EXT4_FEATURE_INCOMPAT_CSUM_SEED | \
1259 EXT4_FEATURE_INCOMPAT_LARGEDIR | \
1260 EXT4_FEATURE_INCOMPAT_CASEFOLD)
1261 #define EXT4_TUNE_SET_RO_COMPAT_SUPP \
1262 (EXT4_FEATURE_RO_COMPAT_LARGE_FILE | \
1263 EXT4_FEATURE_RO_COMPAT_DIR_NLINK | \
1264 EXT4_FEATURE_RO_COMPAT_EXTRA_ISIZE | \
1265 EXT4_FEATURE_RO_COMPAT_PROJECT | \
1266 EXT4_FEATURE_RO_COMPAT_VERITY)
1267
1268 #define EXT4_TUNE_CLEAR_COMPAT_SUPP (0)
1269 #define EXT4_TUNE_CLEAR_INCOMPAT_SUPP (0)
1270 #define EXT4_TUNE_CLEAR_RO_COMPAT_SUPP (0)
1271
1272 #define SB_ENC_SUPP_MASK (SB_ENC_STRICT_MODE_FL | \
1273 SB_ENC_NO_COMPAT_FALLBACK_FL)
1274
ext4_ioctl_get_tune_sb(struct ext4_sb_info * sbi,struct ext4_tune_sb_params __user * params)1275 static int ext4_ioctl_get_tune_sb(struct ext4_sb_info *sbi,
1276 struct ext4_tune_sb_params __user *params)
1277 {
1278 struct ext4_tune_sb_params ret;
1279 struct ext4_super_block *es = sbi->s_es;
1280
1281 memset(&ret, 0, sizeof(ret));
1282 ret.set_flags = TUNE_OPS_SUPPORTED;
1283 ret.errors_behavior = le16_to_cpu(es->s_errors);
1284 ret.mnt_count = le16_to_cpu(es->s_mnt_count);
1285 ret.max_mnt_count = le16_to_cpu(es->s_max_mnt_count);
1286 ret.checkinterval = le32_to_cpu(es->s_checkinterval);
1287 ret.last_check_time = le32_to_cpu(es->s_lastcheck);
1288 ret.reserved_blocks = ext4_r_blocks_count(es);
1289 ret.blocks_count = ext4_blocks_count(es);
1290 ret.reserved_uid = ext4_get_resuid(es);
1291 ret.reserved_gid = ext4_get_resgid(es);
1292 ret.default_mnt_opts = le32_to_cpu(es->s_default_mount_opts);
1293 ret.def_hash_alg = es->s_def_hash_version;
1294 ret.raid_stride = le16_to_cpu(es->s_raid_stride);
1295 ret.raid_stripe_width = le32_to_cpu(es->s_raid_stripe_width);
1296 ret.encoding = le16_to_cpu(es->s_encoding);
1297 ret.encoding_flags = le16_to_cpu(es->s_encoding_flags);
1298 strscpy_pad(ret.mount_opts, es->s_mount_opts);
1299 ret.feature_compat = le32_to_cpu(es->s_feature_compat);
1300 ret.feature_incompat = le32_to_cpu(es->s_feature_incompat);
1301 ret.feature_ro_compat = le32_to_cpu(es->s_feature_ro_compat);
1302 ret.set_feature_compat_mask = EXT4_TUNE_SET_COMPAT_SUPP;
1303 ret.set_feature_incompat_mask = EXT4_TUNE_SET_INCOMPAT_SUPP;
1304 ret.set_feature_ro_compat_mask = EXT4_TUNE_SET_RO_COMPAT_SUPP;
1305 ret.clear_feature_compat_mask = EXT4_TUNE_CLEAR_COMPAT_SUPP;
1306 ret.clear_feature_incompat_mask = EXT4_TUNE_CLEAR_INCOMPAT_SUPP;
1307 ret.clear_feature_ro_compat_mask = EXT4_TUNE_CLEAR_RO_COMPAT_SUPP;
1308 if (copy_to_user(params, &ret, sizeof(ret)))
1309 return -EFAULT;
1310 return 0;
1311 }
1312
ext4_sb_setparams(struct ext4_sb_info * sbi,struct ext4_super_block * es,const void * arg)1313 static void ext4_sb_setparams(struct ext4_sb_info *sbi,
1314 struct ext4_super_block *es, const void *arg)
1315 {
1316 const struct ext4_tune_sb_params *params = arg;
1317
1318 if (params->set_flags & EXT4_TUNE_FL_ERRORS_BEHAVIOR)
1319 es->s_errors = cpu_to_le16(params->errors_behavior);
1320 if (params->set_flags & EXT4_TUNE_FL_MNT_COUNT)
1321 es->s_mnt_count = cpu_to_le16(params->mnt_count);
1322 if (params->set_flags & EXT4_TUNE_FL_MAX_MNT_COUNT)
1323 es->s_max_mnt_count = cpu_to_le16(params->max_mnt_count);
1324 if (params->set_flags & EXT4_TUNE_FL_CHECKINTRVAL)
1325 es->s_checkinterval = cpu_to_le32(params->checkinterval);
1326 if (params->set_flags & EXT4_TUNE_FL_LAST_CHECK_TIME)
1327 es->s_lastcheck = cpu_to_le32(params->last_check_time);
1328 if (params->set_flags & EXT4_TUNE_FL_RESERVED_BLOCKS) {
1329 ext4_fsblk_t blk = params->reserved_blocks;
1330
1331 es->s_r_blocks_count_lo = cpu_to_le32((u32)blk);
1332 es->s_r_blocks_count_hi = cpu_to_le32(blk >> 32);
1333 }
1334 if (params->set_flags & EXT4_TUNE_FL_RESERVED_UID) {
1335 int uid = params->reserved_uid;
1336
1337 es->s_def_resuid = cpu_to_le16(uid & 0xFFFF);
1338 es->s_def_resuid_hi = cpu_to_le16(uid >> 16);
1339 }
1340 if (params->set_flags & EXT4_TUNE_FL_RESERVED_GID) {
1341 int gid = params->reserved_gid;
1342
1343 es->s_def_resgid = cpu_to_le16(gid & 0xFFFF);
1344 es->s_def_resgid_hi = cpu_to_le16(gid >> 16);
1345 }
1346 if (params->set_flags & EXT4_TUNE_FL_DEFAULT_MNT_OPTS)
1347 es->s_default_mount_opts = cpu_to_le32(params->default_mnt_opts);
1348 if (params->set_flags & EXT4_TUNE_FL_DEF_HASH_ALG)
1349 es->s_def_hash_version = params->def_hash_alg;
1350 if (params->set_flags & EXT4_TUNE_FL_RAID_STRIDE)
1351 es->s_raid_stride = cpu_to_le16(params->raid_stride);
1352 if (params->set_flags & EXT4_TUNE_FL_RAID_STRIPE_WIDTH)
1353 es->s_raid_stripe_width =
1354 cpu_to_le32(params->raid_stripe_width);
1355 if (params->set_flags & EXT4_TUNE_FL_ENCODING)
1356 es->s_encoding = cpu_to_le16(params->encoding);
1357 if (params->set_flags & EXT4_TUNE_FL_ENCODING_FLAGS)
1358 es->s_encoding_flags = cpu_to_le16(params->encoding_flags);
1359 strscpy_pad(es->s_mount_opts, params->mount_opts);
1360 if (params->set_flags & EXT4_TUNE_FL_EDIT_FEATURES) {
1361 es->s_feature_compat |=
1362 cpu_to_le32(params->set_feature_compat_mask);
1363 es->s_feature_incompat |=
1364 cpu_to_le32(params->set_feature_incompat_mask);
1365 es->s_feature_ro_compat |=
1366 cpu_to_le32(params->set_feature_ro_compat_mask);
1367 es->s_feature_compat &=
1368 ~cpu_to_le32(params->clear_feature_compat_mask);
1369 es->s_feature_incompat &=
1370 ~cpu_to_le32(params->clear_feature_incompat_mask);
1371 es->s_feature_ro_compat &=
1372 ~cpu_to_le32(params->clear_feature_ro_compat_mask);
1373 if (params->set_feature_compat_mask &
1374 EXT4_FEATURE_COMPAT_DIR_INDEX)
1375 es->s_def_hash_version = sbi->s_def_hash_version;
1376 if (params->set_feature_incompat_mask &
1377 EXT4_FEATURE_INCOMPAT_CSUM_SEED)
1378 es->s_checksum_seed = cpu_to_le32(sbi->s_csum_seed);
1379 }
1380 if (params->set_flags & EXT4_TUNE_FL_FORCE_FSCK)
1381 es->s_state |= cpu_to_le16(EXT4_ERROR_FS);
1382 }
1383
ext4_ioctl_set_tune_sb(struct file * filp,struct ext4_tune_sb_params __user * in)1384 static int ext4_ioctl_set_tune_sb(struct file *filp,
1385 struct ext4_tune_sb_params __user *in)
1386 {
1387 struct ext4_tune_sb_params params;
1388 struct super_block *sb = file_inode(filp)->i_sb;
1389 struct ext4_sb_info *sbi = EXT4_SB(sb);
1390 struct ext4_super_block *es = sbi->s_es;
1391 int enabling_casefold = 0;
1392 int ret;
1393
1394 if (!capable(CAP_SYS_ADMIN))
1395 return -EPERM;
1396
1397 if (copy_from_user(¶ms, in, sizeof(params)))
1398 return -EFAULT;
1399
1400 if (strnlen(params.mount_opts, sizeof(params.mount_opts)) ==
1401 sizeof(params.mount_opts))
1402 return -E2BIG;
1403
1404 if ((params.set_flags & ~TUNE_OPS_SUPPORTED) != 0)
1405 return -EOPNOTSUPP;
1406
1407 if ((params.set_flags & EXT4_TUNE_FL_ERRORS_BEHAVIOR) &&
1408 (params.errors_behavior > EXT4_ERRORS_PANIC))
1409 return -EINVAL;
1410
1411 if ((params.set_flags & EXT4_TUNE_FL_RESERVED_BLOCKS) &&
1412 (params.reserved_blocks > ext4_blocks_count(sbi->s_es) / 2))
1413 return -EINVAL;
1414 if ((params.set_flags & EXT4_TUNE_FL_DEF_HASH_ALG) &&
1415 ((params.def_hash_alg > DX_HASH_LAST) ||
1416 (params.def_hash_alg == DX_HASH_SIPHASH)))
1417 return -EINVAL;
1418 if ((params.set_flags & EXT4_TUNE_FL_FEATURES) &&
1419 (params.set_flags & EXT4_TUNE_FL_EDIT_FEATURES))
1420 return -EINVAL;
1421
1422 if (params.set_flags & EXT4_TUNE_FL_FEATURES) {
1423 params.set_feature_compat_mask =
1424 params.feature_compat &
1425 ~le32_to_cpu(es->s_feature_compat);
1426 params.set_feature_incompat_mask =
1427 params.feature_incompat &
1428 ~le32_to_cpu(es->s_feature_incompat);
1429 params.set_feature_ro_compat_mask =
1430 params.feature_ro_compat &
1431 ~le32_to_cpu(es->s_feature_ro_compat);
1432 params.clear_feature_compat_mask =
1433 ~params.feature_compat &
1434 le32_to_cpu(es->s_feature_compat);
1435 params.clear_feature_incompat_mask =
1436 ~params.feature_incompat &
1437 le32_to_cpu(es->s_feature_incompat);
1438 params.clear_feature_ro_compat_mask =
1439 ~params.feature_ro_compat &
1440 le32_to_cpu(es->s_feature_ro_compat);
1441 params.set_flags |= EXT4_TUNE_FL_EDIT_FEATURES;
1442 }
1443 if (params.set_flags & EXT4_TUNE_FL_EDIT_FEATURES) {
1444 if ((params.set_feature_compat_mask &
1445 ~EXT4_TUNE_SET_COMPAT_SUPP) ||
1446 (params.set_feature_incompat_mask &
1447 ~EXT4_TUNE_SET_INCOMPAT_SUPP) ||
1448 (params.set_feature_ro_compat_mask &
1449 ~EXT4_TUNE_SET_RO_COMPAT_SUPP) ||
1450 (params.clear_feature_compat_mask &
1451 ~EXT4_TUNE_CLEAR_COMPAT_SUPP) ||
1452 (params.clear_feature_incompat_mask &
1453 ~EXT4_TUNE_CLEAR_INCOMPAT_SUPP) ||
1454 (params.clear_feature_ro_compat_mask &
1455 ~EXT4_TUNE_CLEAR_RO_COMPAT_SUPP))
1456 return -EOPNOTSUPP;
1457
1458 /*
1459 * Filter out the features that are already set from
1460 * the set_mask.
1461 */
1462 params.set_feature_compat_mask &=
1463 ~le32_to_cpu(es->s_feature_compat);
1464 params.set_feature_incompat_mask &=
1465 ~le32_to_cpu(es->s_feature_incompat);
1466 params.set_feature_ro_compat_mask &=
1467 ~le32_to_cpu(es->s_feature_ro_compat);
1468 if ((params.set_feature_incompat_mask &
1469 EXT4_FEATURE_INCOMPAT_CASEFOLD)) {
1470 enabling_casefold = 1;
1471 if (!(params.set_flags & EXT4_TUNE_FL_ENCODING)) {
1472 params.encoding = EXT4_ENC_UTF8_12_1;
1473 params.set_flags |= EXT4_TUNE_FL_ENCODING;
1474 }
1475 if (!(params.set_flags & EXT4_TUNE_FL_ENCODING_FLAGS)) {
1476 params.encoding_flags = 0;
1477 params.set_flags |= EXT4_TUNE_FL_ENCODING_FLAGS;
1478 }
1479 }
1480 if ((params.set_feature_compat_mask &
1481 EXT4_FEATURE_COMPAT_DIR_INDEX)) {
1482 uuid_t uu;
1483
1484 memcpy(&uu, sbi->s_hash_seed, UUID_SIZE);
1485 if (uuid_is_null(&uu))
1486 generate_random_uuid((char *)
1487 &sbi->s_hash_seed);
1488 if (params.set_flags & EXT4_TUNE_FL_DEF_HASH_ALG)
1489 sbi->s_def_hash_version = params.def_hash_alg;
1490 else if (sbi->s_def_hash_version == 0)
1491 sbi->s_def_hash_version = DX_HASH_HALF_MD4;
1492 if (!(es->s_flags &
1493 cpu_to_le32(EXT2_FLAGS_UNSIGNED_HASH)) &&
1494 !(es->s_flags &
1495 cpu_to_le32(EXT2_FLAGS_SIGNED_HASH))) {
1496 #ifdef __CHAR_UNSIGNED__
1497 sbi->s_hash_unsigned = 3;
1498 #else
1499 sbi->s_hash_unsigned = 0;
1500 #endif
1501 }
1502 }
1503 }
1504 if (params.set_flags & EXT4_TUNE_FL_ENCODING) {
1505 if (!enabling_casefold)
1506 return -EINVAL;
1507 if (params.encoding == 0)
1508 params.encoding = EXT4_ENC_UTF8_12_1;
1509 else if (params.encoding != EXT4_ENC_UTF8_12_1)
1510 return -EINVAL;
1511 }
1512 if (params.set_flags & EXT4_TUNE_FL_ENCODING_FLAGS) {
1513 if (!enabling_casefold)
1514 return -EINVAL;
1515 if (params.encoding_flags & ~SB_ENC_SUPP_MASK)
1516 return -EINVAL;
1517 }
1518
1519 ret = mnt_want_write_file(filp);
1520 if (ret)
1521 return ret;
1522
1523 ret = ext4_update_superblocks_fn(sb, ext4_sb_setparams, ¶ms);
1524 mnt_drop_write_file(filp);
1525
1526 if (params.set_flags & EXT4_TUNE_FL_DEF_HASH_ALG)
1527 sbi->s_def_hash_version = params.def_hash_alg;
1528
1529 return ret;
1530 }
1531
__ext4_ioctl(struct file * filp,unsigned int cmd,unsigned long arg)1532 static long __ext4_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
1533 {
1534 struct inode *inode = file_inode(filp);
1535 struct super_block *sb = inode->i_sb;
1536 struct mnt_idmap *idmap = file_mnt_idmap(filp);
1537
1538 ext4_debug("cmd = %u, arg = %lu\n", cmd, arg);
1539
1540 switch (cmd) {
1541 case FS_IOC_GETFSMAP:
1542 return ext4_ioc_getfsmap(sb, (void __user *)arg);
1543 case EXT4_IOC_GETVERSION:
1544 case EXT4_IOC_GETVERSION_OLD:
1545 return put_user(inode->i_generation, (int __user *) arg);
1546 case EXT4_IOC_SETVERSION:
1547 case EXT4_IOC_SETVERSION_OLD: {
1548 handle_t *handle;
1549 struct ext4_iloc iloc;
1550 __u32 generation;
1551 int err;
1552
1553 if (!inode_owner_or_capable(idmap, inode))
1554 return -EPERM;
1555
1556 if (ext4_has_feature_metadata_csum(inode->i_sb)) {
1557 ext4_warning(sb, "Setting inode version is not "
1558 "supported with metadata_csum enabled.");
1559 return -ENOTTY;
1560 }
1561
1562 err = mnt_want_write_file(filp);
1563 if (err)
1564 return err;
1565 if (get_user(generation, (int __user *) arg)) {
1566 err = -EFAULT;
1567 goto setversion_out;
1568 }
1569
1570 inode_lock(inode);
1571 handle = ext4_journal_start(inode, EXT4_HT_INODE, 1);
1572 if (IS_ERR(handle)) {
1573 err = PTR_ERR(handle);
1574 goto unlock_out;
1575 }
1576 err = ext4_reserve_inode_write(handle, inode, &iloc);
1577 if (err == 0) {
1578 inode_set_ctime_current(inode);
1579 inode_inc_iversion(inode);
1580 inode->i_generation = generation;
1581 err = ext4_mark_iloc_dirty(handle, inode, &iloc);
1582 }
1583 ext4_journal_stop(handle);
1584
1585 unlock_out:
1586 inode_unlock(inode);
1587 setversion_out:
1588 mnt_drop_write_file(filp);
1589 return err;
1590 }
1591 case EXT4_IOC_GROUP_EXTEND: {
1592 ext4_fsblk_t n_blocks_count;
1593 int err, err2=0;
1594
1595 err = ext4_resize_begin(sb);
1596 if (err)
1597 return err;
1598
1599 if (get_user(n_blocks_count, (__u32 __user *)arg)) {
1600 err = -EFAULT;
1601 goto group_extend_out;
1602 }
1603
1604 if (ext4_has_feature_bigalloc(sb)) {
1605 ext4_msg(sb, KERN_ERR,
1606 "Online resizing not supported with bigalloc");
1607 err = -EOPNOTSUPP;
1608 goto group_extend_out;
1609 }
1610
1611 err = mnt_want_write_file(filp);
1612 if (err)
1613 goto group_extend_out;
1614
1615 err = ext4_group_extend(sb, EXT4_SB(sb)->s_es, n_blocks_count);
1616 if (EXT4_SB(sb)->s_journal) {
1617 ext4_fc_mark_ineligible(sb, EXT4_FC_REASON_RESIZE,
1618 NULL);
1619 jbd2_journal_lock_updates(EXT4_SB(sb)->s_journal);
1620 err2 = jbd2_journal_flush(EXT4_SB(sb)->s_journal, 0);
1621 jbd2_journal_unlock_updates(EXT4_SB(sb)->s_journal);
1622 }
1623 if (err == 0)
1624 err = err2;
1625 mnt_drop_write_file(filp);
1626 group_extend_out:
1627 err2 = ext4_resize_end(sb, false);
1628 if (err == 0)
1629 err = err2;
1630 return err;
1631 }
1632
1633 case EXT4_IOC_MOVE_EXT: {
1634 struct move_extent me;
1635 int err;
1636
1637 if (!(filp->f_mode & FMODE_READ) ||
1638 !(filp->f_mode & FMODE_WRITE))
1639 return -EBADF;
1640
1641 if (copy_from_user(&me,
1642 (struct move_extent __user *)arg, sizeof(me)))
1643 return -EFAULT;
1644 me.moved_len = 0;
1645
1646 CLASS(fd, donor)(me.donor_fd);
1647 if (fd_empty(donor))
1648 return -EBADF;
1649
1650 if (!(fd_file(donor)->f_mode & FMODE_WRITE))
1651 return -EBADF;
1652
1653 err = mnt_want_write_file(filp);
1654 if (err)
1655 return err;
1656
1657 err = ext4_move_extents(filp, fd_file(donor), me.orig_start,
1658 me.donor_start, me.len, &me.moved_len);
1659 mnt_drop_write_file(filp);
1660
1661 if (copy_to_user((struct move_extent __user *)arg,
1662 &me, sizeof(me)))
1663 err = -EFAULT;
1664 return err;
1665 }
1666
1667 case EXT4_IOC_GROUP_ADD: {
1668 struct ext4_new_group_data input;
1669
1670 if (copy_from_user(&input, (struct ext4_new_group_input __user *)arg,
1671 sizeof(input)))
1672 return -EFAULT;
1673
1674 return ext4_ioctl_group_add(filp, &input);
1675 }
1676
1677 case EXT4_IOC_MIGRATE:
1678 {
1679 int err;
1680 if (!inode_owner_or_capable(idmap, inode))
1681 return -EACCES;
1682
1683 err = mnt_want_write_file(filp);
1684 if (err)
1685 return err;
1686 /*
1687 * inode_mutex prevent write and truncate on the file.
1688 * Read still goes through. We take i_data_sem in
1689 * ext4_ext_swap_inode_data before we switch the
1690 * inode format to prevent read.
1691 */
1692 inode_lock((inode));
1693 err = ext4_ext_migrate(inode);
1694 inode_unlock((inode));
1695 mnt_drop_write_file(filp);
1696 return err;
1697 }
1698
1699 case EXT4_IOC_ALLOC_DA_BLKS:
1700 {
1701 int err;
1702 if (!inode_owner_or_capable(idmap, inode))
1703 return -EACCES;
1704
1705 err = mnt_want_write_file(filp);
1706 if (err)
1707 return err;
1708 err = ext4_alloc_da_blocks(inode);
1709 mnt_drop_write_file(filp);
1710 return err;
1711 }
1712
1713 case EXT4_IOC_SWAP_BOOT:
1714 {
1715 int err;
1716 if (!(filp->f_mode & FMODE_WRITE))
1717 return -EBADF;
1718 err = mnt_want_write_file(filp);
1719 if (err)
1720 return err;
1721 err = swap_inode_boot_loader(sb, idmap, inode);
1722 mnt_drop_write_file(filp);
1723 return err;
1724 }
1725
1726 case EXT4_IOC_RESIZE_FS: {
1727 ext4_fsblk_t n_blocks_count;
1728 int err = 0, err2 = 0;
1729 ext4_group_t o_group = EXT4_SB(sb)->s_groups_count;
1730
1731 if (copy_from_user(&n_blocks_count, (__u64 __user *)arg,
1732 sizeof(__u64))) {
1733 return -EFAULT;
1734 }
1735
1736 err = ext4_resize_begin(sb);
1737 if (err)
1738 return err;
1739
1740 err = mnt_want_write_file(filp);
1741 if (err)
1742 goto resizefs_out;
1743
1744 err = ext4_resize_fs(sb, n_blocks_count);
1745 if (EXT4_SB(sb)->s_journal) {
1746 ext4_fc_mark_ineligible(sb, EXT4_FC_REASON_RESIZE, NULL);
1747 jbd2_journal_lock_updates(EXT4_SB(sb)->s_journal);
1748 err2 = jbd2_journal_flush(EXT4_SB(sb)->s_journal, 0);
1749 jbd2_journal_unlock_updates(EXT4_SB(sb)->s_journal);
1750 }
1751 if (err == 0)
1752 err = err2;
1753 mnt_drop_write_file(filp);
1754 if (!err && (o_group < EXT4_SB(sb)->s_groups_count) &&
1755 ext4_has_group_desc_csum(sb) &&
1756 test_opt(sb, INIT_INODE_TABLE))
1757 err = ext4_register_li_request(sb, o_group);
1758
1759 resizefs_out:
1760 err2 = ext4_resize_end(sb, true);
1761 if (err == 0)
1762 err = err2;
1763 return err;
1764 }
1765
1766 case FITRIM:
1767 {
1768 struct fstrim_range range;
1769 int ret = 0;
1770
1771 if (!capable(CAP_SYS_ADMIN))
1772 return -EPERM;
1773
1774 if (!bdev_max_discard_sectors(sb->s_bdev))
1775 return -EOPNOTSUPP;
1776
1777 /*
1778 * We haven't replayed the journal, so we cannot use our
1779 * block-bitmap-guided storage zapping commands.
1780 */
1781 if (test_opt(sb, NOLOAD) && ext4_has_feature_journal(sb))
1782 return -EROFS;
1783
1784 if (copy_from_user(&range, (struct fstrim_range __user *)arg,
1785 sizeof(range)))
1786 return -EFAULT;
1787
1788 ret = ext4_trim_fs(sb, &range);
1789 if (ret < 0)
1790 return ret;
1791
1792 if (copy_to_user((struct fstrim_range __user *)arg, &range,
1793 sizeof(range)))
1794 return -EFAULT;
1795
1796 return 0;
1797 }
1798 case EXT4_IOC_PRECACHE_EXTENTS:
1799 {
1800 int ret;
1801
1802 inode_lock_shared(inode);
1803 ret = ext4_ext_precache(inode);
1804 inode_unlock_shared(inode);
1805 return ret;
1806 }
1807 case FS_IOC_SET_ENCRYPTION_POLICY:
1808 if (!ext4_has_feature_encrypt(sb))
1809 return -EOPNOTSUPP;
1810 return fscrypt_ioctl_set_policy(filp, (const void __user *)arg);
1811
1812 case FS_IOC_GET_ENCRYPTION_PWSALT:
1813 return ext4_ioctl_get_encryption_pwsalt(filp, (void __user *)arg);
1814
1815 case FS_IOC_GET_ENCRYPTION_POLICY:
1816 if (!ext4_has_feature_encrypt(sb))
1817 return -EOPNOTSUPP;
1818 return fscrypt_ioctl_get_policy(filp, (void __user *)arg);
1819
1820 case FS_IOC_GET_ENCRYPTION_POLICY_EX:
1821 if (!ext4_has_feature_encrypt(sb))
1822 return -EOPNOTSUPP;
1823 return fscrypt_ioctl_get_policy_ex(filp, (void __user *)arg);
1824
1825 case FS_IOC_ADD_ENCRYPTION_KEY:
1826 if (!ext4_has_feature_encrypt(sb))
1827 return -EOPNOTSUPP;
1828 return fscrypt_ioctl_add_key(filp, (void __user *)arg);
1829
1830 case FS_IOC_REMOVE_ENCRYPTION_KEY:
1831 if (!ext4_has_feature_encrypt(sb))
1832 return -EOPNOTSUPP;
1833 return fscrypt_ioctl_remove_key(filp, (void __user *)arg);
1834
1835 case FS_IOC_REMOVE_ENCRYPTION_KEY_ALL_USERS:
1836 if (!ext4_has_feature_encrypt(sb))
1837 return -EOPNOTSUPP;
1838 return fscrypt_ioctl_remove_key_all_users(filp,
1839 (void __user *)arg);
1840 case FS_IOC_GET_ENCRYPTION_KEY_STATUS:
1841 if (!ext4_has_feature_encrypt(sb))
1842 return -EOPNOTSUPP;
1843 return fscrypt_ioctl_get_key_status(filp, (void __user *)arg);
1844
1845 case FS_IOC_GET_ENCRYPTION_NONCE:
1846 if (!ext4_has_feature_encrypt(sb))
1847 return -EOPNOTSUPP;
1848 return fscrypt_ioctl_get_nonce(filp, (void __user *)arg);
1849
1850 case EXT4_IOC_CLEAR_ES_CACHE:
1851 {
1852 if (!inode_owner_or_capable(idmap, inode))
1853 return -EACCES;
1854 ext4_clear_inode_es(inode);
1855 return 0;
1856 }
1857
1858 case EXT4_IOC_GETSTATE:
1859 {
1860 __u32 state = 0;
1861
1862 if (ext4_test_inode_state(inode, EXT4_STATE_EXT_PRECACHED))
1863 state |= EXT4_STATE_FLAG_EXT_PRECACHED;
1864 if (ext4_test_inode_state(inode, EXT4_STATE_NEW))
1865 state |= EXT4_STATE_FLAG_NEW;
1866 if (ext4_test_inode_state(inode, EXT4_STATE_NEWENTRY))
1867 state |= EXT4_STATE_FLAG_NEWENTRY;
1868 if (ext4_test_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE))
1869 state |= EXT4_STATE_FLAG_DA_ALLOC_CLOSE;
1870
1871 return put_user(state, (__u32 __user *) arg);
1872 }
1873
1874 case EXT4_IOC_GET_ES_CACHE:
1875 return ext4_ioctl_get_es_cache(filp, arg);
1876
1877 case EXT4_IOC_SHUTDOWN:
1878 return ext4_ioctl_shutdown(sb, arg);
1879
1880 case FS_IOC_ENABLE_VERITY:
1881 if (!ext4_has_feature_verity(sb))
1882 return -EOPNOTSUPP;
1883 return fsverity_ioctl_enable(filp, (const void __user *)arg);
1884
1885 case FS_IOC_MEASURE_VERITY:
1886 if (!ext4_has_feature_verity(sb))
1887 return -EOPNOTSUPP;
1888 return fsverity_ioctl_measure(filp, (void __user *)arg);
1889
1890 case FS_IOC_READ_VERITY_METADATA:
1891 if (!ext4_has_feature_verity(sb))
1892 return -EOPNOTSUPP;
1893 return fsverity_ioctl_read_metadata(filp,
1894 (const void __user *)arg);
1895
1896 case EXT4_IOC_CHECKPOINT:
1897 return ext4_ioctl_checkpoint(filp, arg);
1898
1899 case FS_IOC_GETFSLABEL:
1900 return ext4_ioctl_getlabel(EXT4_SB(sb), (void __user *)arg);
1901
1902 case FS_IOC_SETFSLABEL:
1903 return ext4_ioctl_setlabel(filp,
1904 (const void __user *)arg);
1905
1906 case EXT4_IOC_GETFSUUID:
1907 return ext4_ioctl_getuuid(EXT4_SB(sb), (void __user *)arg);
1908 case EXT4_IOC_SETFSUUID:
1909 return ext4_ioctl_setuuid(filp, (const void __user *)arg);
1910 case EXT4_IOC_GET_TUNE_SB_PARAM:
1911 return ext4_ioctl_get_tune_sb(EXT4_SB(sb),
1912 (void __user *)arg);
1913 case EXT4_IOC_SET_TUNE_SB_PARAM:
1914 return ext4_ioctl_set_tune_sb(filp, (void __user *)arg);
1915 default:
1916 return -ENOTTY;
1917 }
1918 }
1919
ext4_ioctl(struct file * filp,unsigned int cmd,unsigned long arg)1920 long ext4_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
1921 {
1922 return __ext4_ioctl(filp, cmd, arg);
1923 }
1924
1925 #ifdef CONFIG_COMPAT
ext4_compat_ioctl(struct file * file,unsigned int cmd,unsigned long arg)1926 long ext4_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
1927 {
1928 /* These are just misnamed, they actually get/put from/to user an int */
1929 switch (cmd) {
1930 case EXT4_IOC32_GETVERSION:
1931 cmd = EXT4_IOC_GETVERSION;
1932 break;
1933 case EXT4_IOC32_SETVERSION:
1934 cmd = EXT4_IOC_SETVERSION;
1935 break;
1936 case EXT4_IOC32_GROUP_EXTEND:
1937 cmd = EXT4_IOC_GROUP_EXTEND;
1938 break;
1939 case EXT4_IOC32_GETVERSION_OLD:
1940 cmd = EXT4_IOC_GETVERSION_OLD;
1941 break;
1942 case EXT4_IOC32_SETVERSION_OLD:
1943 cmd = EXT4_IOC_SETVERSION_OLD;
1944 break;
1945 case EXT4_IOC32_GETRSVSZ:
1946 cmd = EXT4_IOC_GETRSVSZ;
1947 break;
1948 case EXT4_IOC32_SETRSVSZ:
1949 cmd = EXT4_IOC_SETRSVSZ;
1950 break;
1951 case EXT4_IOC32_GROUP_ADD: {
1952 struct compat_ext4_new_group_input __user *uinput;
1953 struct ext4_new_group_data input;
1954 int err;
1955
1956 uinput = compat_ptr(arg);
1957 err = get_user(input.group, &uinput->group);
1958 err |= get_user(input.block_bitmap, &uinput->block_bitmap);
1959 err |= get_user(input.inode_bitmap, &uinput->inode_bitmap);
1960 err |= get_user(input.inode_table, &uinput->inode_table);
1961 err |= get_user(input.blocks_count, &uinput->blocks_count);
1962 err |= get_user(input.reserved_blocks,
1963 &uinput->reserved_blocks);
1964 if (err)
1965 return -EFAULT;
1966 return ext4_ioctl_group_add(file, &input);
1967 }
1968 case EXT4_IOC_MOVE_EXT:
1969 case EXT4_IOC_RESIZE_FS:
1970 case FITRIM:
1971 case EXT4_IOC_PRECACHE_EXTENTS:
1972 case FS_IOC_SET_ENCRYPTION_POLICY:
1973 case FS_IOC_GET_ENCRYPTION_PWSALT:
1974 case FS_IOC_GET_ENCRYPTION_POLICY:
1975 case FS_IOC_GET_ENCRYPTION_POLICY_EX:
1976 case FS_IOC_ADD_ENCRYPTION_KEY:
1977 case FS_IOC_REMOVE_ENCRYPTION_KEY:
1978 case FS_IOC_REMOVE_ENCRYPTION_KEY_ALL_USERS:
1979 case FS_IOC_GET_ENCRYPTION_KEY_STATUS:
1980 case FS_IOC_GET_ENCRYPTION_NONCE:
1981 case EXT4_IOC_SHUTDOWN:
1982 case FS_IOC_GETFSMAP:
1983 case FS_IOC_ENABLE_VERITY:
1984 case FS_IOC_MEASURE_VERITY:
1985 case FS_IOC_READ_VERITY_METADATA:
1986 case EXT4_IOC_CLEAR_ES_CACHE:
1987 case EXT4_IOC_GETSTATE:
1988 case EXT4_IOC_GET_ES_CACHE:
1989 case EXT4_IOC_CHECKPOINT:
1990 case FS_IOC_GETFSLABEL:
1991 case FS_IOC_SETFSLABEL:
1992 case EXT4_IOC_GETFSUUID:
1993 case EXT4_IOC_SETFSUUID:
1994 break;
1995 default:
1996 return -ENOIOCTLCMD;
1997 }
1998 return ext4_ioctl(file, cmd, (unsigned long) compat_ptr(arg));
1999 }
2000 #endif
2001
set_overhead(struct ext4_sb_info * sbi,struct ext4_super_block * es,const void * arg)2002 static void set_overhead(struct ext4_sb_info *sbi,
2003 struct ext4_super_block *es, const void *arg)
2004 {
2005 es->s_overhead_clusters = cpu_to_le32(*((unsigned long *) arg));
2006 }
2007
ext4_update_overhead(struct super_block * sb,bool force)2008 int ext4_update_overhead(struct super_block *sb, bool force)
2009 {
2010 struct ext4_sb_info *sbi = EXT4_SB(sb);
2011
2012 if (ext4_emergency_state(sb) || sb_rdonly(sb))
2013 return 0;
2014 if (!force &&
2015 (sbi->s_overhead == 0 ||
2016 sbi->s_overhead == le32_to_cpu(sbi->s_es->s_overhead_clusters)))
2017 return 0;
2018 return ext4_update_superblocks_fn(sb, set_overhead, &sbi->s_overhead);
2019 }
2020